Feb 18 00:34:17 crc systemd[1]: Starting Kubernetes Kubelet... Feb 18 00:34:17 crc restorecon[4687]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:17 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Feb 18 00:34:18 crc restorecon[4687]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Feb 18 00:34:18 crc kubenswrapper[4791]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.843383 4791 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.847594 4791 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.847900 4791 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848034 4791 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848528 4791 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848555 4791 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848565 4791 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848576 4791 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848585 4791 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848594 4791 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848602 4791 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848610 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848621 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848630 4791 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848638 4791 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848646 4791 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848654 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848662 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848670 4791 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848678 4791 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848686 4791 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848699 4791 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848710 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848720 4791 feature_gate.go:330] unrecognized feature gate: Example Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848730 4791 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848742 4791 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848752 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848762 4791 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848771 4791 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848781 4791 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848789 4791 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848797 4791 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848806 4791 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848815 4791 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848825 4791 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848836 4791 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848846 4791 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848855 4791 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848864 4791 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848872 4791 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848880 4791 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848888 4791 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848896 4791 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848904 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848912 4791 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848922 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848932 4791 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848942 4791 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848952 4791 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848964 4791 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.848974 4791 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849035 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849047 4791 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849056 4791 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849069 4791 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849078 4791 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849086 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849094 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849102 4791 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849112 4791 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849120 4791 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849128 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849137 4791 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849146 4791 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849196 4791 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849207 4791 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849218 4791 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849229 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849238 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849245 4791 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849253 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.849267 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850227 4791 flags.go:64] FLAG: --address="0.0.0.0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850258 4791 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850278 4791 flags.go:64] FLAG: --anonymous-auth="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850291 4791 flags.go:64] FLAG: --application-metrics-count-limit="100" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850304 4791 flags.go:64] FLAG: --authentication-token-webhook="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850314 4791 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850325 4791 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850337 4791 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850347 4791 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850356 4791 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850366 4791 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850376 4791 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850386 4791 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850395 4791 flags.go:64] FLAG: --cgroup-root="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850405 4791 flags.go:64] FLAG: --cgroups-per-qos="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850414 4791 flags.go:64] FLAG: --client-ca-file="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850422 4791 flags.go:64] FLAG: --cloud-config="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850431 4791 flags.go:64] FLAG: --cloud-provider="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850440 4791 flags.go:64] FLAG: --cluster-dns="[]" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850463 4791 flags.go:64] FLAG: --cluster-domain="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850472 4791 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850482 4791 flags.go:64] FLAG: --config-dir="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850490 4791 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850500 4791 flags.go:64] FLAG: --container-log-max-files="5" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850519 4791 flags.go:64] FLAG: --container-log-max-size="10Mi" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850528 4791 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850538 4791 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850548 4791 flags.go:64] FLAG: --containerd-namespace="k8s.io" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850556 4791 flags.go:64] FLAG: --contention-profiling="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850567 4791 flags.go:64] FLAG: --cpu-cfs-quota="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850576 4791 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850586 4791 flags.go:64] FLAG: --cpu-manager-policy="none" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850595 4791 flags.go:64] FLAG: --cpu-manager-policy-options="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850607 4791 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850617 4791 flags.go:64] FLAG: --enable-controller-attach-detach="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850626 4791 flags.go:64] FLAG: --enable-debugging-handlers="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850634 4791 flags.go:64] FLAG: --enable-load-reader="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850644 4791 flags.go:64] FLAG: --enable-server="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850652 4791 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850665 4791 flags.go:64] FLAG: --event-burst="100" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850675 4791 flags.go:64] FLAG: --event-qps="50" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850684 4791 flags.go:64] FLAG: --event-storage-age-limit="default=0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850693 4791 flags.go:64] FLAG: --event-storage-event-limit="default=0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850702 4791 flags.go:64] FLAG: --eviction-hard="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850713 4791 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850722 4791 flags.go:64] FLAG: --eviction-minimum-reclaim="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850731 4791 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850746 4791 flags.go:64] FLAG: --eviction-soft="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850756 4791 flags.go:64] FLAG: --eviction-soft-grace-period="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850765 4791 flags.go:64] FLAG: --exit-on-lock-contention="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850774 4791 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850783 4791 flags.go:64] FLAG: --experimental-mounter-path="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850792 4791 flags.go:64] FLAG: --fail-cgroupv1="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850801 4791 flags.go:64] FLAG: --fail-swap-on="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850810 4791 flags.go:64] FLAG: --feature-gates="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850821 4791 flags.go:64] FLAG: --file-check-frequency="20s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850830 4791 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850840 4791 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850849 4791 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850859 4791 flags.go:64] FLAG: --healthz-port="10248" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850869 4791 flags.go:64] FLAG: --help="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850878 4791 flags.go:64] FLAG: --hostname-override="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850887 4791 flags.go:64] FLAG: --housekeeping-interval="10s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850896 4791 flags.go:64] FLAG: --http-check-frequency="20s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850905 4791 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850915 4791 flags.go:64] FLAG: --image-credential-provider-config="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850925 4791 flags.go:64] FLAG: --image-gc-high-threshold="85" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850934 4791 flags.go:64] FLAG: --image-gc-low-threshold="80" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850943 4791 flags.go:64] FLAG: --image-service-endpoint="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850951 4791 flags.go:64] FLAG: --kernel-memcg-notification="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850960 4791 flags.go:64] FLAG: --kube-api-burst="100" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850970 4791 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850980 4791 flags.go:64] FLAG: --kube-api-qps="50" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850989 4791 flags.go:64] FLAG: --kube-reserved="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.850998 4791 flags.go:64] FLAG: --kube-reserved-cgroup="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851006 4791 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851018 4791 flags.go:64] FLAG: --kubelet-cgroups="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851026 4791 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851035 4791 flags.go:64] FLAG: --lock-file="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851044 4791 flags.go:64] FLAG: --log-cadvisor-usage="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851053 4791 flags.go:64] FLAG: --log-flush-frequency="5s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851063 4791 flags.go:64] FLAG: --log-json-info-buffer-size="0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851086 4791 flags.go:64] FLAG: --log-json-split-stream="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851095 4791 flags.go:64] FLAG: --log-text-info-buffer-size="0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851105 4791 flags.go:64] FLAG: --log-text-split-stream="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851114 4791 flags.go:64] FLAG: --logging-format="text" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851123 4791 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851133 4791 flags.go:64] FLAG: --make-iptables-util-chains="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851142 4791 flags.go:64] FLAG: --manifest-url="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851179 4791 flags.go:64] FLAG: --manifest-url-header="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851200 4791 flags.go:64] FLAG: --max-housekeeping-interval="15s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851209 4791 flags.go:64] FLAG: --max-open-files="1000000" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851221 4791 flags.go:64] FLAG: --max-pods="110" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851230 4791 flags.go:64] FLAG: --maximum-dead-containers="-1" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851239 4791 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851249 4791 flags.go:64] FLAG: --memory-manager-policy="None" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851257 4791 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851267 4791 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851276 4791 flags.go:64] FLAG: --node-ip="192.168.126.11" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851285 4791 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851310 4791 flags.go:64] FLAG: --node-status-max-images="50" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851319 4791 flags.go:64] FLAG: --node-status-update-frequency="10s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851329 4791 flags.go:64] FLAG: --oom-score-adj="-999" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851338 4791 flags.go:64] FLAG: --pod-cidr="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851347 4791 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851359 4791 flags.go:64] FLAG: --pod-manifest-path="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851368 4791 flags.go:64] FLAG: --pod-max-pids="-1" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851378 4791 flags.go:64] FLAG: --pods-per-core="0" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851386 4791 flags.go:64] FLAG: --port="10250" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851396 4791 flags.go:64] FLAG: --protect-kernel-defaults="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851405 4791 flags.go:64] FLAG: --provider-id="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851414 4791 flags.go:64] FLAG: --qos-reserved="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851423 4791 flags.go:64] FLAG: --read-only-port="10255" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851432 4791 flags.go:64] FLAG: --register-node="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851441 4791 flags.go:64] FLAG: --register-schedulable="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851450 4791 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851465 4791 flags.go:64] FLAG: --registry-burst="10" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851474 4791 flags.go:64] FLAG: --registry-qps="5" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851484 4791 flags.go:64] FLAG: --reserved-cpus="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851493 4791 flags.go:64] FLAG: --reserved-memory="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851504 4791 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851514 4791 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851524 4791 flags.go:64] FLAG: --rotate-certificates="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851533 4791 flags.go:64] FLAG: --rotate-server-certificates="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851542 4791 flags.go:64] FLAG: --runonce="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851551 4791 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851560 4791 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851569 4791 flags.go:64] FLAG: --seccomp-default="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851579 4791 flags.go:64] FLAG: --serialize-image-pulls="true" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851588 4791 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851597 4791 flags.go:64] FLAG: --storage-driver-db="cadvisor" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851606 4791 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851616 4791 flags.go:64] FLAG: --storage-driver-password="root" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851625 4791 flags.go:64] FLAG: --storage-driver-secure="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851634 4791 flags.go:64] FLAG: --storage-driver-table="stats" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851643 4791 flags.go:64] FLAG: --storage-driver-user="root" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851652 4791 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851661 4791 flags.go:64] FLAG: --sync-frequency="1m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851671 4791 flags.go:64] FLAG: --system-cgroups="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851680 4791 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851694 4791 flags.go:64] FLAG: --system-reserved-cgroup="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851703 4791 flags.go:64] FLAG: --tls-cert-file="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851713 4791 flags.go:64] FLAG: --tls-cipher-suites="[]" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851725 4791 flags.go:64] FLAG: --tls-min-version="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851734 4791 flags.go:64] FLAG: --tls-private-key-file="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851743 4791 flags.go:64] FLAG: --topology-manager-policy="none" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851752 4791 flags.go:64] FLAG: --topology-manager-policy-options="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851761 4791 flags.go:64] FLAG: --topology-manager-scope="container" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851770 4791 flags.go:64] FLAG: --v="2" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851782 4791 flags.go:64] FLAG: --version="false" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851794 4791 flags.go:64] FLAG: --vmodule="" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851806 4791 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.851815 4791 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852034 4791 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852044 4791 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852054 4791 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852109 4791 feature_gate.go:330] unrecognized feature gate: Example Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852119 4791 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852127 4791 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852135 4791 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852144 4791 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852173 4791 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852182 4791 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852189 4791 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852197 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852205 4791 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852215 4791 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852225 4791 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852235 4791 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852244 4791 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852253 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852264 4791 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852273 4791 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852282 4791 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852291 4791 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852299 4791 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852308 4791 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852317 4791 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852326 4791 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852334 4791 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852342 4791 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852350 4791 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852358 4791 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852368 4791 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852378 4791 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852388 4791 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852397 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852406 4791 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852414 4791 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852422 4791 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852432 4791 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852444 4791 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852453 4791 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852463 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852472 4791 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852481 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852489 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852498 4791 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852508 4791 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852516 4791 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852524 4791 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852533 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852542 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852550 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852558 4791 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852587 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852598 4791 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852607 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852616 4791 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852625 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852633 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852641 4791 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852649 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852667 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852674 4791 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852683 4791 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852690 4791 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852699 4791 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852707 4791 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852715 4791 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852723 4791 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852731 4791 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852740 4791 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.852748 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.852773 4791 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.863341 4791 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.863411 4791 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863508 4791 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863522 4791 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863526 4791 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863531 4791 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863535 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863540 4791 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863547 4791 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863566 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863571 4791 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863577 4791 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863586 4791 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863591 4791 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863595 4791 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863599 4791 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863604 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863609 4791 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863614 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863618 4791 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863623 4791 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863648 4791 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863653 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863657 4791 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863662 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863666 4791 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863670 4791 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863675 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863680 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863684 4791 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863688 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863693 4791 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863697 4791 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863701 4791 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863706 4791 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863725 4791 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863732 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863737 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863742 4791 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863747 4791 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863753 4791 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863758 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863763 4791 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863768 4791 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863773 4791 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863778 4791 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863783 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863805 4791 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863811 4791 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863816 4791 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863820 4791 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863825 4791 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863829 4791 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863838 4791 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863842 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863846 4791 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863851 4791 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863855 4791 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863859 4791 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863864 4791 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863887 4791 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863892 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863897 4791 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863903 4791 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863907 4791 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863912 4791 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863916 4791 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863920 4791 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863924 4791 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863937 4791 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863941 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863962 4791 feature_gate.go:330] unrecognized feature gate: Example Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.863970 4791 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.863978 4791 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864194 4791 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864206 4791 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864211 4791 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864216 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864220 4791 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864225 4791 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864231 4791 feature_gate.go:330] unrecognized feature gate: NewOLM Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864253 4791 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864259 4791 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864264 4791 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864269 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864274 4791 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864279 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864283 4791 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864287 4791 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864292 4791 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864298 4791 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864303 4791 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864308 4791 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864330 4791 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864336 4791 feature_gate.go:330] unrecognized feature gate: PinnedImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864341 4791 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864346 4791 feature_gate.go:330] unrecognized feature gate: PlatformOperators Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864351 4791 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864356 4791 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864361 4791 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864365 4791 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864373 4791 feature_gate.go:330] unrecognized feature gate: OVNObservability Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864378 4791 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864383 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864387 4791 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864409 4791 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864414 4791 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864419 4791 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864424 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864428 4791 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864432 4791 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864436 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864440 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864444 4791 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864449 4791 feature_gate.go:330] unrecognized feature gate: SignatureStores Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864453 4791 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864458 4791 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864462 4791 feature_gate.go:330] unrecognized feature gate: GatewayAPI Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864466 4791 feature_gate.go:330] unrecognized feature gate: InsightsConfig Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864489 4791 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864494 4791 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864498 4791 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864502 4791 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864506 4791 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864510 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864514 4791 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864518 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864522 4791 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864526 4791 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864530 4791 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864534 4791 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864538 4791 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864542 4791 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864547 4791 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864566 4791 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864571 4791 feature_gate.go:330] unrecognized feature gate: Example Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864575 4791 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864579 4791 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864584 4791 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864588 4791 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864592 4791 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864596 4791 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864600 4791 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864605 4791 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.864609 4791 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.864616 4791 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.864832 4791 server.go:940] "Client rotation is on, will bootstrap in background" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.869496 4791 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.869844 4791 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.871809 4791 server.go:997] "Starting client certificate rotation" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.871837 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.872145 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-26 15:12:40.368496158 +0000 UTC Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.872257 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.895323 4791 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.898184 4791 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 18 00:34:18 crc kubenswrapper[4791]: E0218 00:34:18.898930 4791 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.911593 4791 log.go:25] "Validated CRI v1 runtime API" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.948892 4791 log.go:25] "Validated CRI v1 image API" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.950870 4791 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.958263 4791 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-02-18-00-29-41-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.958301 4791 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.978497 4791 manager.go:217] Machine: {Timestamp:2026-02-18 00:34:18.975231064 +0000 UTC m=+0.543244274 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:e1be6815-4e1d-4d7f-9918-bb66fef09f86 BootID:49ba9a08-9d87-4573-b809-fff0547601af Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:9d:4c:4c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:9d:4c:4c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:b9:e4:3e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:8d:02:8d Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:56:2f:ea Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:61:6f:47 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:b6:33:59:cc:b3:8e Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:1e:f8:5b:81:03:bc Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.978774 4791 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979005 4791 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979358 4791 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979561 4791 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979602 4791 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979820 4791 topology_manager.go:138] "Creating topology manager with none policy" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.979832 4791 container_manager_linux.go:303] "Creating device plugin manager" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.980430 4791 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.980468 4791 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.980672 4791 state_mem.go:36] "Initialized new in-memory state store" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.980766 4791 server.go:1245] "Using root directory" path="/var/lib/kubelet" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.984782 4791 kubelet.go:418] "Attempting to sync node with API server" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.984808 4791 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.984835 4791 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.984849 4791 kubelet.go:324] "Adding apiserver pod source" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.984861 4791 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.988961 4791 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.990344 4791 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.992478 4791 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.995369 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995487 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995620 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995637 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995645 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995660 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995669 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995677 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995697 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995710 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995723 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995735 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.995742 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Feb 18 00:34:18 crc kubenswrapper[4791]: E0218 00:34:18.995552 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:18 crc kubenswrapper[4791]: W0218 00:34:18.995731 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:18 crc kubenswrapper[4791]: E0218 00:34:18.995823 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.996356 4791 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Feb 18 00:34:18 crc kubenswrapper[4791]: I0218 00:34:18.996941 4791 server.go:1280] "Started kubelet" Feb 18 00:34:18 crc systemd[1]: Started Kubernetes Kubelet. Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.000771 4791 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.000822 4791 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.001286 4791 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.001782 4791 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.006682 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.006724 4791 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.006858 4791 server.go:460] "Adding debug handlers to kubelet server" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.007213 4791 volume_manager.go:287] "The desired_state_of_world populator starts" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.007282 4791 volume_manager.go:289] "Starting Kubelet Volume Manager" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.007439 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 06:40:31.014072093 +0000 UTC Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.007328 4791 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.006411 4791 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1895300c92916774 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-18 00:34:18.996901748 +0000 UTC m=+0.564914918,LastTimestamp:2026-02-18 00:34:18.996901748 +0000 UTC m=+0.564914918,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.011140 4791 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.011938 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="200ms" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012355 4791 factory.go:153] Registering CRI-O factory Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012376 4791 factory.go:221] Registration of the crio container factory successfully Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012442 4791 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012451 4791 factory.go:55] Registering systemd factory Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012458 4791 factory.go:221] Registration of the systemd container factory successfully Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012473 4791 factory.go:103] Registering Raw factory Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.012486 4791 manager.go:1196] Started watching for new ooms in manager Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.013501 4791 manager.go:319] Starting recovery of all containers Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.016079 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.016195 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021479 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021536 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021554 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021566 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021579 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021590 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021601 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021617 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.021632 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025030 4791 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025078 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025101 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025118 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025134 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025182 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025197 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025217 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025230 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025260 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025272 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025291 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025307 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025319 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025332 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025346 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025358 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025371 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025395 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025408 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025420 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025438 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025452 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025463 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025475 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025497 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025511 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025528 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025545 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025561 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025574 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025593 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025606 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025617 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025630 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025641 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025654 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025666 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025679 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025691 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025703 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025729 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025741 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025753 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025771 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025784 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025797 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025810 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025823 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025843 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025861 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025874 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025886 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.025898 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026316 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026342 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026367 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026381 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026395 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026415 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026429 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026447 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026461 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026474 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026493 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026509 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026528 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026541 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026556 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026575 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026590 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026608 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026620 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026633 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026704 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026721 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026748 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026769 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026789 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026817 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026830 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.026845 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029717 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029747 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029776 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029792 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029807 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029827 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029842 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029861 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029875 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029889 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029909 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029922 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.029969 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030003 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030071 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030100 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030118 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030142 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030176 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030197 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030238 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030258 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030277 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030292 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030311 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030326 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030346 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030361 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030381 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030396 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030413 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030433 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030452 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030468 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030492 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030507 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030526 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030548 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030563 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030584 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030598 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030612 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030631 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030646 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030666 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030684 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030698 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030718 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030732 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030752 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030767 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030781 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030802 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030820 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030833 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030855 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030868 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.030889 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031135 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031168 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031192 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031208 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031239 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031255 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031277 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031290 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031300 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031321 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031336 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031346 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031377 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031387 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031396 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031409 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031425 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031438 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031449 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031458 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031473 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031483 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031496 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031510 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031525 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031538 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031549 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031565 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031576 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031587 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031599 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031611 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031628 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031663 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031674 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.031686 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032707 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032727 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032741 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032752 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032766 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032778 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032796 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032810 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032823 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032847 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032859 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032870 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032886 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032896 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032910 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032921 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032932 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032946 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032957 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032971 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032981 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.032992 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.033007 4791 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.033017 4791 reconstruct.go:97] "Volume reconstruction finished" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.033025 4791 reconciler.go:26] "Reconciler: start to sync state" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.033378 4791 manager.go:324] Recovery completed Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.043818 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.046415 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.046462 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.046477 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.050801 4791 cpu_manager.go:225] "Starting CPU manager" policy="none" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.050822 4791 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.050841 4791 state_mem.go:36] "Initialized new in-memory state store" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.057447 4791 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.059545 4791 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.059871 4791 status_manager.go:217] "Starting to sync pod status with apiserver" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.059905 4791 kubelet.go:2335] "Starting kubelet main sync loop" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.059951 4791 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.063190 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.063274 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.069582 4791 policy_none.go:49] "None policy: Start" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.070501 4791 memory_manager.go:170] "Starting memorymanager" policy="None" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.070533 4791 state_mem.go:35] "Initializing new in-memory state store" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.107718 4791 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131324 4791 manager.go:334] "Starting Device Plugin manager" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131371 4791 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131383 4791 server.go:79] "Starting device plugin registration server" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131740 4791 eviction_manager.go:189] "Eviction manager: starting control loop" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131756 4791 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131887 4791 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.131995 4791 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.132003 4791 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.139339 4791 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.160543 4791 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.160639 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.161613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.161650 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.161661 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.161789 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162677 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162711 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162713 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162812 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.162929 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163048 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163088 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163297 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163320 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163333 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163591 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163608 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163619 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163659 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163668 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163690 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163838 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.163881 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164245 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164283 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164293 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164405 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164467 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.164491 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165196 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165218 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165227 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165255 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165267 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165275 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165285 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165306 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165379 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165396 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165893 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.165932 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.212941 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="400ms" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.232519 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.233540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.233592 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.233613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.233658 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.234137 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234269 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234293 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234315 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234337 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234356 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234403 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234422 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234444 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234483 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234500 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234519 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234539 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234560 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.234578 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335409 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335452 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335471 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335486 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335502 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335515 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335527 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335541 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335554 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335595 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335608 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335622 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335621 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335688 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335714 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335637 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335739 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335759 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335776 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335792 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335823 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335840 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335854 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335869 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335883 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335897 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335935 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.335729 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.434364 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.435966 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.436014 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.436024 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.436045 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.436506 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.507440 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.515028 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.535010 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.543037 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.544077 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.551299 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-767a046d47002be77b387059a75a59604326bf91ded8a067d44225bcd68a0d2b WatchSource:0}: Error finding container 767a046d47002be77b387059a75a59604326bf91ded8a067d44225bcd68a0d2b: Status 404 returned error can't find the container with id 767a046d47002be77b387059a75a59604326bf91ded8a067d44225bcd68a0d2b Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.552851 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-6e13e0a708a44aaadf6bcf4940d57472388334330fe210f061d94681ad7de841 WatchSource:0}: Error finding container 6e13e0a708a44aaadf6bcf4940d57472388334330fe210f061d94681ad7de841: Status 404 returned error can't find the container with id 6e13e0a708a44aaadf6bcf4940d57472388334330fe210f061d94681ad7de841 Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.569527 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-999dbc04e6f9547163b07b5bd5c89761ed7da8e19459dd7717f024d1a61c8268 WatchSource:0}: Error finding container 999dbc04e6f9547163b07b5bd5c89761ed7da8e19459dd7717f024d1a61c8268: Status 404 returned error can't find the container with id 999dbc04e6f9547163b07b5bd5c89761ed7da8e19459dd7717f024d1a61c8268 Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.573128 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-84f89a7d1d8e732497d66b827bac5f5e43ad753202b5a4f617833f1dbd2546e4 WatchSource:0}: Error finding container 84f89a7d1d8e732497d66b827bac5f5e43ad753202b5a4f617833f1dbd2546e4: Status 404 returned error can't find the container with id 84f89a7d1d8e732497d66b827bac5f5e43ad753202b5a4f617833f1dbd2546e4 Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.614018 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="800ms" Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.829497 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.829586 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.836906 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.838265 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.838297 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.838308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:19 crc kubenswrapper[4791]: I0218 00:34:19.838333 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.838771 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Feb 18 00:34:19 crc kubenswrapper[4791]: W0218 00:34:19.923465 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:19 crc kubenswrapper[4791]: E0218 00:34:19.923548 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.002300 4791 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.008304 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 23:53:09.567392173 +0000 UTC Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.069656 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"999dbc04e6f9547163b07b5bd5c89761ed7da8e19459dd7717f024d1a61c8268"} Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.070477 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6e13e0a708a44aaadf6bcf4940d57472388334330fe210f061d94681ad7de841"} Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.072139 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"767a046d47002be77b387059a75a59604326bf91ded8a067d44225bcd68a0d2b"} Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.073521 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"84f89a7d1d8e732497d66b827bac5f5e43ad753202b5a4f617833f1dbd2546e4"} Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.074820 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9a79e1be035c73a88038aa628f880835b0c68ac70db154cf1d7aa30707d2996e"} Feb 18 00:34:20 crc kubenswrapper[4791]: W0218 00:34:20.395856 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:20 crc kubenswrapper[4791]: E0218 00:34:20.395925 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:20 crc kubenswrapper[4791]: E0218 00:34:20.415289 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="1.6s" Feb 18 00:34:20 crc kubenswrapper[4791]: W0218 00:34:20.634649 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:20 crc kubenswrapper[4791]: E0218 00:34:20.634715 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.638949 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.643150 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.643210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.643220 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.643245 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:20 crc kubenswrapper[4791]: E0218 00:34:20.643537 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Feb 18 00:34:20 crc kubenswrapper[4791]: I0218 00:34:20.911816 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 18 00:34:20 crc kubenswrapper[4791]: E0218 00:34:20.913409 4791 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.002069 4791 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.009350 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 19:37:17.838778129 +0000 UTC Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.080196 4791 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9" exitCode=0 Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.080291 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.080391 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.081494 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.081529 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.081541 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083365 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083352 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083406 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083423 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083435 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083987 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.083995 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.084912 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78" exitCode=0 Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.084956 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.085082 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086319 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086385 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086856 4791 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968" exitCode=0 Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086916 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.086947 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.087860 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.087880 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.087889 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.088313 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.088632 4791 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916" exitCode=0 Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.088658 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916"} Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.088708 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089269 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089339 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089378 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.089399 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:21 crc kubenswrapper[4791]: I0218 00:34:21.995823 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.001932 4791 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.010178 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 10:13:15.290182811 +0000 UTC Feb 18 00:34:22 crc kubenswrapper[4791]: E0218 00:34:22.016179 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="3.2s" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094241 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094286 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094300 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094308 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094314 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.094418 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095107 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095137 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095628 4791 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2" exitCode=0 Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095673 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.095796 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.096499 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.096531 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.096542 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.097084 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e5a695212b2ac3fbc5ae5b16df68a833130025c3c7aeb7e1ea2e02f36ee3b96c"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.097115 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.097769 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.097805 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.097818 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099043 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099064 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099071 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099075 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099092 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2"} Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099806 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099832 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099841 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099893 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099911 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.099921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: W0218 00:34:22.212656 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:22 crc kubenswrapper[4791]: E0218 00:34:22.212736 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.244449 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.245463 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.245493 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.245500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.245521 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:22 crc kubenswrapper[4791]: E0218 00:34:22.245861 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.113:6443: connect: connection refused" node="crc" Feb 18 00:34:22 crc kubenswrapper[4791]: W0218 00:34:22.254307 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:22 crc kubenswrapper[4791]: E0218 00:34:22.254354 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:22 crc kubenswrapper[4791]: I0218 00:34:22.255355 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:22 crc kubenswrapper[4791]: W0218 00:34:22.423639 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.113:6443: connect: connection refused Feb 18 00:34:22 crc kubenswrapper[4791]: E0218 00:34:22.423707 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.113:6443: connect: connection refused" logger="UnhandledError" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.010695 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 17:16:49.188923521 +0000 UTC Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.102852 4791 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc" exitCode=0 Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.102914 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc"} Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.103296 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.103414 4791 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.103448 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.103464 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.105889 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.105946 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106004 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106229 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106282 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106338 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106367 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106377 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106414 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106427 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.106437 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107295 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107336 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107562 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.107406 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.321108 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:23 crc kubenswrapper[4791]: I0218 00:34:23.327144 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.011659 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 07:47:19.765903543 +0000 UTC Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109243 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae"} Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109312 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62"} Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109337 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c"} Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109367 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28"} Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109350 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.109319 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110411 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110452 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110762 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110792 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.110808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.375691 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:24 crc kubenswrapper[4791]: I0218 00:34:24.714206 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.011859 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 05:40:07.342057509 +0000 UTC Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.117084 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81"} Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.117240 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.117244 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.117273 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118482 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118534 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118551 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118906 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.118926 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.119717 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.119743 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.119752 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.225643 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.446587 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.448396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.448434 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.448448 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:25 crc kubenswrapper[4791]: I0218 00:34:25.448477 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.012047 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 17:01:01.698254261 +0000 UTC Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.119441 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.119508 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121024 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121064 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121075 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121077 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.121095 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.519244 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.519473 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.520937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.520966 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.520981 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:26 crc kubenswrapper[4791]: I0218 00:34:26.735079 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.012885 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 00:19:16.361047626 +0000 UTC Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.122727 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.125000 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.125095 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.125243 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.376133 4791 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 18 00:34:27 crc kubenswrapper[4791]: I0218 00:34:27.376341 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.013257 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 06:23:03.835286098 +0000 UTC Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.467013 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.467264 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.468705 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.468775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:28 crc kubenswrapper[4791]: I0218 00:34:28.468799 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:29 crc kubenswrapper[4791]: I0218 00:34:29.013734 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 18:43:00.178202646 +0000 UTC Feb 18 00:34:29 crc kubenswrapper[4791]: E0218 00:34:29.139459 4791 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Feb 18 00:34:30 crc kubenswrapper[4791]: I0218 00:34:30.014832 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 13:38:47.096664269 +0000 UTC Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.014988 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 08:10:01.027338987 +0000 UTC Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.562666 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.562923 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.564441 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.564493 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:31 crc kubenswrapper[4791]: I0218 00:34:31.564505 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.015494 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 16:35:49.085952344 +0000 UTC Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.264145 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.264352 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.265742 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.265786 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.265798 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:32 crc kubenswrapper[4791]: W0218 00:34:32.666767 4791 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Feb 18 00:34:32 crc kubenswrapper[4791]: I0218 00:34:32.666890 4791 trace.go:236] Trace[1164740074]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (18-Feb-2026 00:34:22.664) (total time: 10001ms): Feb 18 00:34:32 crc kubenswrapper[4791]: Trace[1164740074]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (00:34:32.666) Feb 18 00:34:32 crc kubenswrapper[4791]: Trace[1164740074]: [10.001936991s] [10.001936991s] END Feb 18 00:34:32 crc kubenswrapper[4791]: E0218 00:34:32.666952 4791 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.003050 4791 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.016552 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 09:58:11.326991419 +0000 UTC Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.138246 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.140332 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6" exitCode=255 Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.140374 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6"} Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.140503 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.141190 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.141223 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.141233 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.141643 4791 scope.go:117] "RemoveContainer" containerID="b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.594054 4791 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.594113 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.609276 4791 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Feb 18 00:34:33 crc kubenswrapper[4791]: I0218 00:34:33.609384 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.016949 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 20:09:19.858922983 +0000 UTC Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.144238 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.145595 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8"} Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.145732 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.146456 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.146480 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.146487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.724439 4791 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]log ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]etcd ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/openshift.io-api-request-count-filter ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/openshift.io-startkubeinformers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-apiserver-admission-initializer ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/generic-apiserver-start-informers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/priority-and-fairness-config-consumer ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/priority-and-fairness-filter ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/storage-object-count-tracker-hook ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-apiextensions-informers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-apiextensions-controllers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/crd-informer-synced ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-system-namespaces-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-cluster-authentication-info-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-legacy-token-tracking-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-service-ip-repair-controllers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/priority-and-fairness-config-producer ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/bootstrap-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/start-kube-aggregator-informers ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-status-local-available-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-status-remote-available-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-registration-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-wait-for-first-sync ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-discovery-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/kube-apiserver-autoregistration ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]autoregister-completion ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-openapi-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: [+]poststarthook/apiservice-openapiv3-controller ok Feb 18 00:34:34 crc kubenswrapper[4791]: livez check failed Feb 18 00:34:34 crc kubenswrapper[4791]: I0218 00:34:34.724507 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:34:35 crc kubenswrapper[4791]: I0218 00:34:35.017836 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 03:22:47.95063381 +0000 UTC Feb 18 00:34:36 crc kubenswrapper[4791]: I0218 00:34:36.018285 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 15:20:34.050101778 +0000 UTC Feb 18 00:34:37 crc kubenswrapper[4791]: I0218 00:34:37.019460 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 13:06:21.326598904 +0000 UTC Feb 18 00:34:37 crc kubenswrapper[4791]: I0218 00:34:37.376797 4791 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 18 00:34:37 crc kubenswrapper[4791]: I0218 00:34:37.376958 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.019639 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 03:22:44.754338047 +0000 UTC Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.333109 4791 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 18 00:34:38 crc kubenswrapper[4791]: E0218 00:34:38.587796 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.590342 4791 trace.go:236] Trace[1277695126]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (18-Feb-2026 00:34:28.347) (total time: 10243ms): Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[1277695126]: ---"Objects listed" error: 10243ms (00:34:38.590) Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[1277695126]: [10.243210776s] [10.243210776s] END Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.590397 4791 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.595069 4791 trace.go:236] Trace[1444935150]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (18-Feb-2026 00:34:27.277) (total time: 11317ms): Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[1444935150]: ---"Objects listed" error: 11317ms (00:34:38.595) Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[1444935150]: [11.317662525s] [11.317662525s] END Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.595113 4791 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 18 00:34:38 crc kubenswrapper[4791]: E0218 00:34:38.605907 4791 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.605955 4791 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.607994 4791 trace.go:236] Trace[756167010]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (18-Feb-2026 00:34:27.942) (total time: 10665ms): Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[756167010]: ---"Objects listed" error: 10665ms (00:34:38.607) Feb 18 00:34:38 crc kubenswrapper[4791]: Trace[756167010]: [10.665681219s] [10.665681219s] END Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.608020 4791 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.612353 4791 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 18 00:34:38 crc kubenswrapper[4791]: I0218 00:34:38.994634 4791 apiserver.go:52] "Watching apiserver" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.008124 4791 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.008466 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.008945 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.009122 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.009225 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.009299 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.009334 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.009357 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.009424 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.009559 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.009574 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.010901 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.011960 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.011969 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.012011 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.012418 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.012484 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.012570 4791 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.013213 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.013357 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.014935 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.019774 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 15:13:30.393552658 +0000 UTC Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.041957 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.051896 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.061873 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.076114 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.087220 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.099938 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109435 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109490 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109514 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109544 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109561 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109581 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109596 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109626 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109644 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109664 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109696 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109716 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109732 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109747 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109767 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109787 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109810 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109829 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109861 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109884 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109900 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109901 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109931 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109948 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109964 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.109981 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110031 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110040 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110055 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110092 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110113 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110177 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110202 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110211 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110228 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110266 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110298 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110334 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110362 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110395 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110443 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110460 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110475 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110477 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110513 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110534 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110553 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110568 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110587 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110603 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110619 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110634 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110651 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110669 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110685 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110701 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110723 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110743 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110759 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110775 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110791 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110810 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110825 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110840 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110857 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110873 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110890 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110906 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110921 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110941 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110960 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110975 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.110990 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111005 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111019 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111035 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111051 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111065 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111081 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111095 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111113 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111127 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111143 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111175 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111191 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111209 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111229 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111235 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111246 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111261 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111278 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111299 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111317 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111332 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111348 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111365 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111397 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111412 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111456 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111472 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111489 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111506 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111525 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111541 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111557 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111573 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111588 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111604 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111620 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111637 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111653 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111670 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111685 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111701 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111716 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111731 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111747 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111766 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111781 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111796 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111814 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111829 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111845 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111861 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111877 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111894 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111909 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111942 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111960 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111978 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111994 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112010 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112045 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112065 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112080 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112096 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112111 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112126 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112143 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112178 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112196 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112213 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112228 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112245 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112261 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112278 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112295 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112310 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112332 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112348 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112364 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112381 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112398 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112414 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112431 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112448 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112464 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112481 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112497 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112512 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112530 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112548 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112566 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112583 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112603 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112622 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112640 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112657 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112673 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112690 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112779 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112799 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112817 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112838 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112856 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112872 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112890 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112934 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.124390 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.126350 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130397 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111377 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111832 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.111959 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112305 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112452 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.112662 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113147 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113383 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113450 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113473 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113601 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113666 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113735 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113047 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113892 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.113955 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114090 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114135 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114473 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.131762 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114558 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114589 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114804 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114964 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.114993 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115243 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115335 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115351 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115552 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115652 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115700 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.115913 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116002 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116386 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116167 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116442 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116621 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116630 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116804 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116829 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.116889 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.117275 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.119370 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.119527 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.119572 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120005 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120012 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120066 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120113 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120583 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120716 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120732 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120750 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.120718 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.121382 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.121572 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.121692 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.122176 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.122674 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.122688 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.122928 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.122978 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123185 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123310 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123391 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123582 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123747 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123758 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123812 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123837 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123986 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.123320 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.124218 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.124301 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.124412 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.125857 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.125911 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.125928 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.126298 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.126725 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.126939 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.127262 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.127324 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.127716 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.127990 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.128253 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.128404 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.128985 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.129296 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130324 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130470 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130612 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130612 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.130635 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.131136 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.131188 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.131531 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.132464 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.132483 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.132640 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133393 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.132255 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133394 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133435 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133417 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133438 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133470 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133558 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133568 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133594 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133597 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133622 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133648 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133670 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133691 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133714 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133738 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133758 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133779 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133867 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.133895 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134040 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134124 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134167 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134679 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134705 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134756 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134770 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134803 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134828 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134983 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135055 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135086 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135102 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135102 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.134852 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135417 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.135659 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:34:39.635590145 +0000 UTC m=+21.203603315 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135723 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135731 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135766 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135775 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135794 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135817 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135509 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135897 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.135970 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136074 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136456 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136505 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136586 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136957 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137283 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137506 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137549 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137683 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137669 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137734 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.137782 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137804 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137862 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137956 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137970 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.136689 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.137707 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.138470 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:39.637916686 +0000 UTC m=+21.205929856 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.138037 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.138606 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139008 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139055 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139130 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139211 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139141 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139288 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139423 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139695 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139806 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.139963 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140043 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140078 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140384 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.140470 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140472 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.140845 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:39.640583357 +0000 UTC m=+21.208596527 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140897 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140939 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.140995 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141046 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141087 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141063 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141133 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141212 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141240 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141245 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141256 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141898 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.141965 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.142228 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.142461 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.142668 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.142752 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.142892 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.143125 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.143323 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.145807 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.146148 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.146356 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.146378 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.144080 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.148751 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.148774 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.148795 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.149000 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.143397 4791 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.151237 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.154259 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.154490 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.155108 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.155140 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.156502 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157128 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157174 4791 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157189 4791 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157200 4791 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157194 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157211 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157223 4791 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157234 4791 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157245 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157254 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157264 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157275 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157285 4791 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157295 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157306 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157317 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157327 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157337 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157346 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157357 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157367 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157379 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157389 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157398 4791 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157410 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157422 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157433 4791 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157444 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157453 4791 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157464 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157474 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157484 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157496 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157507 4791 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157517 4791 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157528 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157537 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157548 4791 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157558 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157570 4791 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157580 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157589 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157600 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157611 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157622 4791 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157631 4791 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157640 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157650 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157660 4791 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157670 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157679 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157689 4791 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157700 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157709 4791 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157718 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157728 4791 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157739 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157748 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157758 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157770 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157779 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157789 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157797 4791 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157807 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157819 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157835 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157845 4791 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157858 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157867 4791 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157877 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157886 4791 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157895 4791 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157904 4791 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157914 4791 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157923 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157932 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157914 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158231 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158450 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158465 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158478 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158536 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:39.658519024 +0000 UTC m=+21.226532194 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158749 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158763 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158771 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.158799 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:39.658791453 +0000 UTC m=+21.226804623 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.157942 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158911 4791 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158922 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158934 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158944 4791 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158954 4791 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158964 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158974 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158986 4791 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.158995 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159005 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159015 4791 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159047 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159060 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159072 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159082 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159092 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159101 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159111 4791 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159120 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159129 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159139 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159148 4791 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159172 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159183 4791 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159195 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159205 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159214 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159224 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159236 4791 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159249 4791 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159259 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159268 4791 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159277 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159286 4791 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159295 4791 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159303 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159312 4791 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159322 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159333 4791 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159343 4791 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159352 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159361 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159370 4791 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159380 4791 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159389 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159398 4791 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159407 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159416 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159432 4791 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159441 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159450 4791 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159493 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159501 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159513 4791 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159522 4791 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159531 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159539 4791 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159550 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159559 4791 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159568 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159577 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159587 4791 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159596 4791 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159606 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159616 4791 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159625 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159636 4791 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159647 4791 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159656 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159665 4791 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159674 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159683 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159704 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159714 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159723 4791 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159732 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159754 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159764 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159772 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159782 4791 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159793 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159801 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.159810 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.164307 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.164596 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.171327 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.171290 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.171735 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.178001 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.178070 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.178710 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.178888 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.182054 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.182226 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.190530 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.191038 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.198652 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.206831 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.217211 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.229101 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261109 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261152 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261225 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261235 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261245 4791 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261254 4791 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261264 4791 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261274 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261283 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261292 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261302 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261311 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261321 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261329 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261340 4791 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261348 4791 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261356 4791 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261366 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261374 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261383 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261371 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261431 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261467 4791 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261543 4791 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261586 4791 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261609 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261630 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261649 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261669 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261690 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261709 4791 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261726 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261743 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261760 4791 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261778 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261797 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261815 4791 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261835 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.261866 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.323207 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.333667 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.339316 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Feb 18 00:34:39 crc kubenswrapper[4791]: W0218 00:34:39.346220 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-df3a0d28e9e3291a43b0300f571989d7d0cdf914a9c8f5a5b44d48ed7d92e61c WatchSource:0}: Error finding container df3a0d28e9e3291a43b0300f571989d7d0cdf914a9c8f5a5b44d48ed7d92e61c: Status 404 returned error can't find the container with id df3a0d28e9e3291a43b0300f571989d7d0cdf914a9c8f5a5b44d48ed7d92e61c Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.665659 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.665936 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:34:40.665914432 +0000 UTC m=+22.233927622 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.666346 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666500 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.666495 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666584 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:40.666572382 +0000 UTC m=+22.234585562 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666619 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.666627 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666713 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:40.666695726 +0000 UTC m=+22.234708896 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.666739 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666834 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666849 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666862 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.666902 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:40.666892572 +0000 UTC m=+22.234905752 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.667080 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.667185 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.667212 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: E0218 00:34:39.667476 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:40.667352046 +0000 UTC m=+22.235365256 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.721016 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.721511 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.728640 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.732488 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.741418 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.744066 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.753798 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.771182 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.781976 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.796510 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.811530 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.822170 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.838519 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:33Z\\\",\\\"message\\\":\\\"W0218 00:34:22.108325 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0218 00:34:22.108571 1 crypto.go:601] Generating new CA for check-endpoints-signer@1771374862 cert, and key in /tmp/serving-cert-623947780/serving-signer.crt, /tmp/serving-cert-623947780/serving-signer.key\\\\nI0218 00:34:22.523502 1 observer_polling.go:159] Starting file observer\\\\nW0218 00:34:22.525400 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0218 00:34:22.525522 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:22.527417 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-623947780/tls.crt::/tmp/serving-cert-623947780/tls.key\\\\\\\"\\\\nF0218 00:34:33.085419 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.854472 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.867806 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.879702 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:39 crc kubenswrapper[4791]: I0218 00:34:39.894818 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.020441 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 05:48:03.291066397 +0000 UTC Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.170723 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.171432 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.172817 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" exitCode=255 Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.172849 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.173018 4791 scope.go:117] "RemoveContainer" containerID="b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.175924 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.175970 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.175985 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bf743b7dc7d1e08a6388ccff5e977475c9ef87161bb3ca000add2ba748c555f3"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.177500 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5b2c8293cc3cd187b5a1013037c0ea5807675cf692a0b4be91997c1299e93dff"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.178577 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502"} Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.178599 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"df3a0d28e9e3291a43b0300f571989d7d0cdf914a9c8f5a5b44d48ed7d92e61c"} Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.181786 4791 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.182050 4791 scope.go:117] "RemoveContainer" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.182386 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.191753 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.205201 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.218151 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.230003 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.240526 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.253679 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:33Z\\\",\\\"message\\\":\\\"W0218 00:34:22.108325 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0218 00:34:22.108571 1 crypto.go:601] Generating new CA for check-endpoints-signer@1771374862 cert, and key in /tmp/serving-cert-623947780/serving-signer.crt, /tmp/serving-cert-623947780/serving-signer.key\\\\nI0218 00:34:22.523502 1 observer_polling.go:159] Starting file observer\\\\nW0218 00:34:22.525400 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0218 00:34:22.525522 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:22.527417 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-623947780/tls.crt::/tmp/serving-cert-623947780/tls.key\\\\\\\"\\\\nF0218 00:34:33.085419 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.267917 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.284046 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.297936 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.313469 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.332339 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b63c6e04acaa4e43b2ae978cce80ae00e6586479ab4323ae2c81e673b5e535c6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:33Z\\\",\\\"message\\\":\\\"W0218 00:34:22.108325 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0218 00:34:22.108571 1 crypto.go:601] Generating new CA for check-endpoints-signer@1771374862 cert, and key in /tmp/serving-cert-623947780/serving-signer.crt, /tmp/serving-cert-623947780/serving-signer.key\\\\nI0218 00:34:22.523502 1 observer_polling.go:159] Starting file observer\\\\nW0218 00:34:22.525400 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0218 00:34:22.525522 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:22.527417 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-623947780/tls.crt::/tmp/serving-cert-623947780/tls.key\\\\\\\"\\\\nF0218 00:34:33.085419 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.345224 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.357215 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.371610 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.673458 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.673610 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673709 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:34:42.673673284 +0000 UTC m=+24.241686484 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673789 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.673797 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673817 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673836 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.673859 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673908 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:42.67388542 +0000 UTC m=+24.241898620 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.673941 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.673948 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674013 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674055 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674076 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:42.674059125 +0000 UTC m=+24.242072335 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674076 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674107 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674147 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:42.674126907 +0000 UTC m=+24.242140117 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:40 crc kubenswrapper[4791]: E0218 00:34:40.674207 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:42.674187319 +0000 UTC m=+24.242200489 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:40 crc kubenswrapper[4791]: I0218 00:34:40.876025 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.021201 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 07:03:23.262514432 +0000 UTC Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.061080 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.061123 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.061080 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:41 crc kubenswrapper[4791]: E0218 00:34:41.061296 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:41 crc kubenswrapper[4791]: E0218 00:34:41.061371 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:41 crc kubenswrapper[4791]: E0218 00:34:41.061516 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.065496 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.072024 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.073301 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.075004 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.075946 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.077677 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.078794 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.079659 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.081127 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.081883 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.083112 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.084092 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.085341 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.086088 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.087406 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.088116 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.089040 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.090271 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.091444 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.092454 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.093918 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.094909 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.098542 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.099564 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.100138 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.101765 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.103391 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.104040 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.104848 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.105921 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.106374 4791 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.106473 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.108605 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.109133 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.109568 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.111007 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.111971 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.112492 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.113465 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.114077 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.114925 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.115513 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.116532 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.117543 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.118001 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.119260 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.119776 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.120879 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.121336 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.121761 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.122593 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.123102 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.123823 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.124363 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.183074 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.185831 4791 scope.go:117] "RemoveContainer" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" Feb 18 00:34:41 crc kubenswrapper[4791]: E0218 00:34:41.185966 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.197977 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.211642 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.225815 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.240933 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.254249 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.267186 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.284344 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.585979 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.600676 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.601919 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.605301 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.616494 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.635719 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.652148 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.669323 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.683300 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.700532 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.712678 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.727817 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.755579 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.774069 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.790085 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.806926 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.822336 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:41 crc kubenswrapper[4791]: I0218 00:34:41.835115 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.022071 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 21:22:06.353366364 +0000 UTC Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.190337 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787"} Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.191409 4791 scope.go:117] "RemoveContainer" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.191578 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.204609 4791 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.207538 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.222102 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.243350 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.257046 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.276113 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.300404 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.322302 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.336780 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.692280 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.692394 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.692422 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.692444 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:42 crc kubenswrapper[4791]: I0218 00:34:42.692472 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692555 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:34:46.692523127 +0000 UTC m=+28.260536307 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692610 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692629 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692641 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692645 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692690 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:46.692675212 +0000 UTC m=+28.260688442 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692715 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:46.692700163 +0000 UTC m=+28.260713433 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692767 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692767 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692808 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:46.692797616 +0000 UTC m=+28.260810886 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692778 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692834 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:42 crc kubenswrapper[4791]: E0218 00:34:42.692866 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:46.692858938 +0000 UTC m=+28.260872208 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:43 crc kubenswrapper[4791]: I0218 00:34:43.023267 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 22:07:09.833403092 +0000 UTC Feb 18 00:34:43 crc kubenswrapper[4791]: I0218 00:34:43.060752 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:43 crc kubenswrapper[4791]: I0218 00:34:43.060817 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:43 crc kubenswrapper[4791]: I0218 00:34:43.060762 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:43 crc kubenswrapper[4791]: E0218 00:34:43.060934 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:43 crc kubenswrapper[4791]: E0218 00:34:43.061031 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:43 crc kubenswrapper[4791]: E0218 00:34:43.061122 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.024360 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 08:45:04.74714009 +0000 UTC Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.382611 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.390385 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.394170 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.404154 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.421025 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.445173 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.475853 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.502936 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.518946 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.535245 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.547706 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.563360 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.579212 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.591725 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.606398 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.622297 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.645959 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.656842 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.667218 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:44 crc kubenswrapper[4791]: I0218 00:34:44.678057 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:44Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.006170 4791 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.007794 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.007829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.007839 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.007897 4791 kubelet_node_status.go:76] "Attempting to register node" node="crc" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.013417 4791 kubelet_node_status.go:115] "Node was previously registered" node="crc" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.013633 4791 kubelet_node_status.go:79] "Successfully registered node" node="crc" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.014448 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.014555 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.014638 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.014722 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.014793 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.024598 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 10:24:59.162266621 +0000 UTC Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.033898 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:45Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.036972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.037011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.037023 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.037037 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.037047 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.049476 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:45Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.052977 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.053020 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.053028 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.053042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.053051 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.061052 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.061052 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.061196 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.061070 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.061260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.061349 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.067297 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:45Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.069771 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.069800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.069807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.069823 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.069834 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.083362 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:45Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.086514 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.086547 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.086556 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.086570 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.086583 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.098880 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:45Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:45 crc kubenswrapper[4791]: E0218 00:34:45.099044 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.100691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.100734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.100743 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.100757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.100770 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.202878 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.202912 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.202923 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.202937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.202947 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.305460 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.305496 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.305505 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.305519 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.305530 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.358086 4791 csr.go:261] certificate signing request csr-w8g2r is approved, waiting to be issued Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.396071 4791 csr.go:257] certificate signing request csr-w8g2r is issued Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.407884 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.407927 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.407939 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.407955 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.407968 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.510117 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.510170 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.510180 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.510200 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.510209 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.612676 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.612718 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.612747 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.612761 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.612772 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.714666 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.714702 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.714711 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.714725 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.714736 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.816890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.816933 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.816954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.816973 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.816983 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.919500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.919543 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.919557 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.919575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:45 crc kubenswrapper[4791]: I0218 00:34:45.919587 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:45Z","lastTransitionTime":"2026-02-18T00:34:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.021541 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.021578 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.021587 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.021600 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.021609 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.025700 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 00:34:52.038212212 +0000 UTC Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.124280 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.124315 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.124323 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.124337 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.124346 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.226648 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.226683 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.226694 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.226708 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.226717 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.259483 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-bhfmv"] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.259841 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.262229 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-d2kpn"] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.262437 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.263946 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.264974 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265202 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265255 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265379 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265422 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265504 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.265968 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.266366 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-cg5l2"] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.266630 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vnz85"] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.267381 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.267718 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.269102 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.269943 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270026 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270175 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270194 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tr5hg"] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270362 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270730 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.270889 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.272844 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.273177 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.273338 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.273499 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.273981 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.274041 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.274142 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.274392 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.285821 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.299556 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.310440 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322799 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-os-release\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322835 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322862 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322887 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322917 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-kubelet\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322950 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-netns\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322970 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-etc-kubernetes\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.322991 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hg6c\" (UniqueName: \"kubernetes.io/projected/83bdb769-59eb-4472-ba08-be5897ee2cd6-kube-api-access-6hg6c\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323012 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323036 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-socket-dir-parent\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323057 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-multus-certs\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323075 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-conf-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323100 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-os-release\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323118 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-binary-copy\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323149 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323204 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323219 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323233 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b31a333-8f95-459c-8135-e91e557c4c85-mcd-auth-proxy-config\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323257 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-system-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323279 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323335 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323367 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323388 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-system-cni-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323408 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-hosts-file\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323476 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpnzj\" (UniqueName: \"kubernetes.io/projected/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-kube-api-access-mpnzj\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323525 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323560 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323580 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323638 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xql4c\" (UniqueName: \"kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323700 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323723 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323756 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323774 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0b31a333-8f95-459c-8135-e91e557c4c85-rootfs\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323789 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b31a333-8f95-459c-8135-e91e557c4c85-proxy-tls\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323805 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-k8s-cni-cncf-io\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323821 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwvrc\" (UniqueName: \"kubernetes.io/projected/bb393ddf-cece-42f2-8d94-c88a3d536802-kube-api-access-zwvrc\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323837 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-cni-binary-copy\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323852 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323874 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323957 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-hostroot\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323979 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-cnibin\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.323996 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-daemon-config\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324018 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324033 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324057 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-cnibin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324077 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324093 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxtwt\" (UniqueName: \"kubernetes.io/projected/0b31a333-8f95-459c-8135-e91e557c4c85-kube-api-access-cxtwt\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324113 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-multus\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324134 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.324149 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-bin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.328238 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.328930 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.328963 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.328973 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.328994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.329003 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.340393 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.350351 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.362346 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.377707 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.395447 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.397532 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-02-18 00:29:45 +0000 UTC, rotation deadline is 2026-12-29 16:39:57.959697636 +0000 UTC Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.397582 4791 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7552h5m11.56211837s for next certificate rotation Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.414479 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425490 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425538 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-daemon-config\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425555 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425592 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-cnibin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425610 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425626 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxtwt\" (UniqueName: \"kubernetes.io/projected/0b31a333-8f95-459c-8135-e91e557c4c85-kube-api-access-cxtwt\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425643 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-bin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425649 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425741 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-bin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425751 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-cnibin\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425827 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425875 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-multus\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.426312 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-daemon-config\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.425659 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-cni-multus\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.426953 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.426973 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427070 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.426987 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-kubelet\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427104 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-var-lib-kubelet\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427120 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-os-release\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427146 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427175 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427192 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hg6c\" (UniqueName: \"kubernetes.io/projected/83bdb769-59eb-4472-ba08-be5897ee2cd6-kube-api-access-6hg6c\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427211 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-netns\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427225 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-etc-kubernetes\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427238 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-os-release\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427247 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427263 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-socket-dir-parent\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427279 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-multus-certs\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427295 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-os-release\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427308 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-conf-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427333 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-system-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427347 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-binary-copy\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427365 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427380 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427396 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427412 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b31a333-8f95-459c-8135-e91e557c4c85-mcd-auth-proxy-config\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427414 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427436 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427453 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427468 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427476 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-netns\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427486 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427524 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-os-release\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427551 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427580 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-conf-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427598 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-system-cni-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427628 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-hosts-file\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427658 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpnzj\" (UniqueName: \"kubernetes.io/projected/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-kube-api-access-mpnzj\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427685 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427701 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-system-cni-dir\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427714 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427747 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427754 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427777 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427808 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xql4c\" (UniqueName: \"kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427835 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427863 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-k8s-cni-cncf-io\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427891 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwvrc\" (UniqueName: \"kubernetes.io/projected/bb393ddf-cece-42f2-8d94-c88a3d536802-kube-api-access-zwvrc\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427921 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0b31a333-8f95-459c-8135-e91e557c4c85-rootfs\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427947 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b31a333-8f95-459c-8135-e91e557c4c85-proxy-tls\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427995 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428010 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-cni-binary-copy\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428037 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428044 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428067 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428074 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428122 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-hostroot\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428178 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-cnibin\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428216 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428241 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-system-cni-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428261 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-cnibin\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428290 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-hosts-file\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428405 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428479 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b31a333-8f95-459c-8135-e91e557c4c85-mcd-auth-proxy-config\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428514 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427432 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-etc-kubernetes\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428545 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428575 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-multus-socket-dir-parent\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.427948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-binary-copy\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428605 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-k8s-cni-cncf-io\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428679 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bb393ddf-cece-42f2-8d94-c88a3d536802-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428759 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0b31a333-8f95-459c-8135-e91e557c4c85-rootfs\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428854 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428865 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428014 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428913 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-hostroot\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428916 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.428980 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bb393ddf-cece-42f2-8d94-c88a3d536802-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.429427 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83bdb769-59eb-4472-ba08-be5897ee2cd6-cni-binary-copy\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.429525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/83bdb769-59eb-4472-ba08-be5897ee2cd6-host-run-multus-certs\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.433927 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.433961 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.433972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.433989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.434000 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.434872 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b31a333-8f95-459c-8135-e91e557c4c85-proxy-tls\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.435234 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.457776 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xql4c\" (UniqueName: \"kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c\") pod \"ovnkube-node-tr5hg\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.458097 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.461642 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwvrc\" (UniqueName: \"kubernetes.io/projected/bb393ddf-cece-42f2-8d94-c88a3d536802-kube-api-access-zwvrc\") pod \"multus-additional-cni-plugins-vnz85\" (UID: \"bb393ddf-cece-42f2-8d94-c88a3d536802\") " pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.465720 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxtwt\" (UniqueName: \"kubernetes.io/projected/0b31a333-8f95-459c-8135-e91e557c4c85-kube-api-access-cxtwt\") pod \"machine-config-daemon-bhfmv\" (UID: \"0b31a333-8f95-459c-8135-e91e557c4c85\") " pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.469876 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpnzj\" (UniqueName: \"kubernetes.io/projected/cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1-kube-api-access-mpnzj\") pod \"node-resolver-cg5l2\" (UID: \"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\") " pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.474630 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hg6c\" (UniqueName: \"kubernetes.io/projected/83bdb769-59eb-4472-ba08-be5897ee2cd6-kube-api-access-6hg6c\") pod \"multus-d2kpn\" (UID: \"83bdb769-59eb-4472-ba08-be5897ee2cd6\") " pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.520275 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.535992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.536267 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.536342 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.536416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.536071 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.536670 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.548247 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.565516 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.570131 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.577610 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-d2kpn" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.579567 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.582865 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-cg5l2" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.589355 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vnz85" Feb 18 00:34:46 crc kubenswrapper[4791]: W0218 00:34:46.592295 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83bdb769_59eb_4472_ba08_be5897ee2cd6.slice/crio-efbc9f88e1c1de0972e887f8a6fc1e9e7e023b84f02703e3e26dfbe9df692c7f WatchSource:0}: Error finding container efbc9f88e1c1de0972e887f8a6fc1e9e7e023b84f02703e3e26dfbe9df692c7f: Status 404 returned error can't find the container with id efbc9f88e1c1de0972e887f8a6fc1e9e7e023b84f02703e3e26dfbe9df692c7f Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.593615 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: W0218 00:34:46.593793 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcee2e3b0_8c1a_4fe7_86ba_1e54cbdd33e1.slice/crio-3bdb38fe593b9c51f7469fc5ee5b901180d887e8e326f4185f79e071901c6e23 WatchSource:0}: Error finding container 3bdb38fe593b9c51f7469fc5ee5b901180d887e8e326f4185f79e071901c6e23: Status 404 returned error can't find the container with id 3bdb38fe593b9c51f7469fc5ee5b901180d887e8e326f4185f79e071901c6e23 Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.598594 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.605822 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: W0218 00:34:46.607122 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb393ddf_cece_42f2_8d94_c88a3d536802.slice/crio-b2f2cba31cac4ec84edb36ffdb19873002db2f7228c95abaef17a8e83576d42e WatchSource:0}: Error finding container b2f2cba31cac4ec84edb36ffdb19873002db2f7228c95abaef17a8e83576d42e: Status 404 returned error can't find the container with id b2f2cba31cac4ec84edb36ffdb19873002db2f7228c95abaef17a8e83576d42e Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.617019 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.630752 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.641019 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.641057 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.641067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.641086 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.641096 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.644931 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.663042 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.675827 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.692774 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:46Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.731262 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.731397 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:34:54.731360992 +0000 UTC m=+36.299374162 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.731751 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.731888 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.731931 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.731949 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.731833 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732069 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:54.732061792 +0000 UTC m=+36.300074962 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732296 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732340 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:54.73233031 +0000 UTC m=+36.300343480 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732032 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732369 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732378 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732401 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:54.732395332 +0000 UTC m=+36.300408502 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732411 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732439 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732451 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:46 crc kubenswrapper[4791]: E0218 00:34:46.732506 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:34:54.732490245 +0000 UTC m=+36.300503415 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.743677 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.743731 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.743744 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.743759 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.743789 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.846232 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.846277 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.846289 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.846308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.846321 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.948348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.948382 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.948390 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.948403 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:46 crc kubenswrapper[4791]: I0218 00:34:46.948413 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:46Z","lastTransitionTime":"2026-02-18T00:34:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.025964 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 12:53:40.063547965 +0000 UTC Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.050984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.051019 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.051030 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.051047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.051057 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.060926 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:47 crc kubenswrapper[4791]: E0218 00:34:47.061013 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.060926 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:47 crc kubenswrapper[4791]: E0218 00:34:47.061103 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.061290 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:47 crc kubenswrapper[4791]: E0218 00:34:47.061359 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.152735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.152773 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.152781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.152793 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.152802 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.204255 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357" exitCode=0 Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.204337 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.204370 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"5457055bb43f94330ff5781a028bde94b56febcde31f08807ade6fe3fee37ea0"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.206389 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df" exitCode=0 Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.206461 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.206500 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerStarted","Data":"b2f2cba31cac4ec84edb36ffdb19873002db2f7228c95abaef17a8e83576d42e"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.207629 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-cg5l2" event={"ID":"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1","Type":"ContainerStarted","Data":"7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.207661 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-cg5l2" event={"ID":"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1","Type":"ContainerStarted","Data":"3bdb38fe593b9c51f7469fc5ee5b901180d887e8e326f4185f79e071901c6e23"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.208837 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerStarted","Data":"773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.208860 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerStarted","Data":"efbc9f88e1c1de0972e887f8a6fc1e9e7e023b84f02703e3e26dfbe9df692c7f"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.210929 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.211291 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.211308 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"d245fc69d9ddb1a64fb05fbda6b773f5d8be1dd6db6b1f1bbb94772a95cc29ce"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.229063 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.246111 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.255377 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.255402 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.255410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.255422 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.255432 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.258581 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.272254 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.286239 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.296872 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.309609 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.325707 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.340789 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.354784 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.360731 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.360773 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.360785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.360802 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.360814 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.370992 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.384140 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.399325 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.418793 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.430741 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.455599 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.462634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.462676 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.462687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.462702 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.462712 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.466601 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.486570 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.500425 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.511793 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.525360 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.547858 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.560443 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.564445 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.564477 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.564488 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.564502 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.564511 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.582041 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.599718 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.613666 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.627095 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.639030 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.669309 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.669629 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.669645 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.669663 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.669676 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.742624 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-cq6jj"] Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.742967 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.744461 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.744635 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.745034 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.745081 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.756443 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.771729 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.771768 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.771778 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.771795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.771805 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.774630 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.786198 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.813027 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.831962 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.842558 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-host\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.842608 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-serviceca\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.842635 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4mlx\" (UniqueName: \"kubernetes.io/projected/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-kube-api-access-d4mlx\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.845742 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.857234 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.870860 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.873715 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.873748 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.873760 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.873776 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.873787 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.882104 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.895019 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.906001 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.918711 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.932903 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.943788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-host\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.943832 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-serviceca\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.943855 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4mlx\" (UniqueName: \"kubernetes.io/projected/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-kube-api-access-d4mlx\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.944193 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-host\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.945561 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-serviceca\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.949106 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.959494 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.966562 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4mlx\" (UniqueName: \"kubernetes.io/projected/81e5ba5a-400b-42ef-bc1a-f98f45a2e227-kube-api-access-d4mlx\") pod \"node-ca-cq6jj\" (UID: \"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\") " pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.975713 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.975757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.975766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.975781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:47 crc kubenswrapper[4791]: I0218 00:34:47.975792 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:47Z","lastTransitionTime":"2026-02-18T00:34:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.026478 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 04:07:03.5193739 +0000 UTC Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.078017 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.078050 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.078057 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.078071 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.078080 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.107044 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cq6jj" Feb 18 00:34:48 crc kubenswrapper[4791]: W0218 00:34:48.119341 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81e5ba5a_400b_42ef_bc1a_f98f45a2e227.slice/crio-ea19bada2164ae6f1cbceff70da9d5ddd6765ff716b49ba85fa23cd5bb3aa441 WatchSource:0}: Error finding container ea19bada2164ae6f1cbceff70da9d5ddd6765ff716b49ba85fa23cd5bb3aa441: Status 404 returned error can't find the container with id ea19bada2164ae6f1cbceff70da9d5ddd6765ff716b49ba85fa23cd5bb3aa441 Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.181779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.181817 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.181826 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.181842 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.181851 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.217333 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.217388 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.217401 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.217412 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.217423 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.218238 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cq6jj" event={"ID":"81e5ba5a-400b-42ef-bc1a-f98f45a2e227","Type":"ContainerStarted","Data":"ea19bada2164ae6f1cbceff70da9d5ddd6765ff716b49ba85fa23cd5bb3aa441"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.220361 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309" exitCode=0 Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.220398 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.235792 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.246958 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.257121 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.278486 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.284578 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.284605 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.284613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.284628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.284639 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.295024 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.308138 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.322409 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.336612 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.349617 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.366833 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.378923 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.397433 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.397479 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.397492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.397510 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.397522 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.400855 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.422265 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.445365 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.466740 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:48Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.499420 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.499458 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.499469 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.499484 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.499495 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.601429 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.601465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.601473 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.601488 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.601498 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.703737 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.703771 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.703779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.703793 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.703801 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.806615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.806657 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.806684 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.806702 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.806713 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.871690 4791 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.910795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.910842 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.910851 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.910874 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:48 crc kubenswrapper[4791]: I0218 00:34:48.910884 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:48Z","lastTransitionTime":"2026-02-18T00:34:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.012665 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.012746 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.012768 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.012796 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.012820 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.027206 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 21:54:23.60947783 +0000 UTC Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.060866 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.060868 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.060964 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:49 crc kubenswrapper[4791]: E0218 00:34:49.061071 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:49 crc kubenswrapper[4791]: E0218 00:34:49.061303 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:49 crc kubenswrapper[4791]: E0218 00:34:49.061355 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.074171 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.086337 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.103528 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.114703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.114759 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.114775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.114797 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.114812 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.116038 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.127603 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.137380 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.156532 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.187281 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.214629 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.216873 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.216939 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.216950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.216967 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.216978 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.226680 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.227934 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cq6jj" event={"ID":"81e5ba5a-400b-42ef-bc1a-f98f45a2e227","Type":"ContainerStarted","Data":"2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.230076 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774" exitCode=0 Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.230112 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.230403 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.244688 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.264193 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.281534 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.296971 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.310684 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.319611 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.319644 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.319658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.319674 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.319686 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.323364 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.336734 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.351355 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.365780 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.377119 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.387330 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.403712 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421459 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421815 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421849 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421862 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421878 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.421888 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.431479 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.440249 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.457766 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.470570 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.483624 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.494905 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.507213 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.524769 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.524811 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.524819 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.524834 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.524848 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.626932 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.627172 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.627284 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.627405 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.627504 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.730291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.730630 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.730760 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.730914 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.731035 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.833850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.834500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.834640 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.834779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.834905 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.938429 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.938500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.938540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.938576 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:49 crc kubenswrapper[4791]: I0218 00:34:49.938599 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:49Z","lastTransitionTime":"2026-02-18T00:34:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.027875 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 12:04:45.763039436 +0000 UTC Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.041124 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.041224 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.041247 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.041279 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.041302 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.143555 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.143624 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.143641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.143667 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.143684 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.237197 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.237197 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41" exitCode=0 Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.245807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.245862 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.245879 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.245905 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.245924 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.252995 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.272346 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.308724 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.329021 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.348408 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.348475 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.348492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.348517 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.348534 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.354227 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.372872 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.386226 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.401441 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.415139 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.426827 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.438345 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.450780 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.451012 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.451057 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.451067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.451084 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.451093 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.461936 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.473917 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.490033 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:50Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.553302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.553350 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.553365 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.553384 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.553398 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.655456 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.655485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.655492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.655505 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.655513 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.758303 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.758350 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.758359 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.758376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.758390 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.861047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.861085 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.861098 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.861114 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.861126 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.963210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.963248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.963259 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.963274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:50 crc kubenswrapper[4791]: I0218 00:34:50.963285 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:50Z","lastTransitionTime":"2026-02-18T00:34:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.028563 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 20:29:59.864681802 +0000 UTC Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.061298 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.061333 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.061356 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:51 crc kubenswrapper[4791]: E0218 00:34:51.061486 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:51 crc kubenswrapper[4791]: E0218 00:34:51.061936 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:51 crc kubenswrapper[4791]: E0218 00:34:51.062025 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.066330 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.066400 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.066423 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.066451 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.066475 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.186151 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.186202 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.186210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.186225 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.186236 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.243511 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f" exitCode=0 Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.243571 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.248928 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.257204 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.274264 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.286468 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.288832 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.288877 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.288894 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.288983 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.289009 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.297983 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.314913 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.330573 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.346266 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.358169 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.369905 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.382308 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.391308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.391350 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.391364 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.391385 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.391401 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.393055 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.408537 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.424004 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.441889 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.463677 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:51Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.493855 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.493882 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.493891 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.493903 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.493911 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.597520 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.597909 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.598098 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.598355 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.598499 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.700968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.701291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.701409 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.701514 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.701604 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.805501 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.805552 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.805566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.805585 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.805599 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.908293 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.908349 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.908366 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.908389 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:51 crc kubenswrapper[4791]: I0218 00:34:51.908406 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:51Z","lastTransitionTime":"2026-02-18T00:34:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.011614 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.011678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.011704 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.011735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.011759 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.028849 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 09:50:14.97042546 +0000 UTC Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.114678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.114750 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.114773 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.114809 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.114834 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.218490 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.218543 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.218565 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.218590 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.218606 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.258131 4791 generic.go:334] "Generic (PLEG): container finished" podID="bb393ddf-cece-42f2-8d94-c88a3d536802" containerID="ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859" exitCode=0 Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.258210 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerDied","Data":"ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.285902 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.305854 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.321044 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.321216 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.321247 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.321317 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.321346 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.324442 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.340795 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.368100 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.382954 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.407185 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.424640 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.425020 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.425032 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.425049 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.425061 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.442301 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.460476 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.477727 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.498206 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.528592 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.529044 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.529090 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.529276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.529323 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.529342 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.544044 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.561461 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.574738 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:52Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.632047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.632078 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.632087 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.632101 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.632111 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.734072 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.734133 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.734147 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.734209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.734222 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.836704 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.836730 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.836741 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.836756 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.836767 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.938778 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.938819 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.938833 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.938849 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:52 crc kubenswrapper[4791]: I0218 00:34:52.938862 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:52Z","lastTransitionTime":"2026-02-18T00:34:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.029880 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 14:37:05.796118955 +0000 UTC Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.041668 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.041709 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.041720 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.041737 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.041750 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.060476 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.060499 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.060519 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:53 crc kubenswrapper[4791]: E0218 00:34:53.060610 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:53 crc kubenswrapper[4791]: E0218 00:34:53.060715 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:53 crc kubenswrapper[4791]: E0218 00:34:53.060832 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.143580 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.143635 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.143654 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.143678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.143696 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.245887 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.245919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.245928 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.245941 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.245949 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.263975 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" event={"ID":"bb393ddf-cece-42f2-8d94-c88a3d536802","Type":"ContainerStarted","Data":"9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.269131 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.269540 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.281831 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.296649 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.302270 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.309234 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.349717 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.349758 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.349768 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.349784 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.349796 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.357348 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.381543 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.393407 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.404825 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.414089 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.431172 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.489930 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.490463 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.490516 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.490528 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.490546 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.490559 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.501114 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.512167 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.525429 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.536909 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.545294 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.557058 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.571846 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.584491 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.592478 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.592510 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.592520 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.592535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.592546 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.595070 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.604968 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.622443 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.635697 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.648234 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.659169 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.671534 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.681073 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.693478 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.694276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.694313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.694326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.694342 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.694353 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.705806 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.720783 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.737792 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:53Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.796850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.797130 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.797139 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.797175 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.797195 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.898911 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.898952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.898961 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.898974 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:53 crc kubenswrapper[4791]: I0218 00:34:53.898981 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:53Z","lastTransitionTime":"2026-02-18T00:34:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.001478 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.001555 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.001581 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.001612 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.001635 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.030675 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 15:58:35.962492619 +0000 UTC Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.103903 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.103977 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.103994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.104016 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.104034 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.206108 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.206222 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.206247 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.206274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.206296 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.272009 4791 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.272797 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.297559 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.308879 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.308940 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.308965 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.308992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.309010 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.309126 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.320531 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.343652 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.357256 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.374207 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.390669 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.406213 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.411379 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.411418 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.411429 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.411449 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.411462 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.422376 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.438842 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.453366 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.468835 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.484229 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.501049 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.514432 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.514480 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.514492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.514508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.514520 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.517629 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.539944 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:54Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.617399 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.617436 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.617455 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.617470 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.617483 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.720456 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.720518 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.720537 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.720563 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.720582 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.821226 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.821384 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.821413 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.821443 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821482 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:35:10.821448138 +0000 UTC m=+52.389461328 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.821548 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821631 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821655 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821677 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821692 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821714 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821732 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:10.821717577 +0000 UTC m=+52.389730957 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821717 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821787 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821797 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821837 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:10.82182776 +0000 UTC m=+52.389841140 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.821932 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:10.821903222 +0000 UTC m=+52.389916482 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: E0218 00:34:54.822359 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:10.822337176 +0000 UTC m=+52.390350546 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.822658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.822704 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.822720 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.822746 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.822762 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.925956 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.926008 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.926020 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.926040 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:54 crc kubenswrapper[4791]: I0218 00:34:54.926054 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:54Z","lastTransitionTime":"2026-02-18T00:34:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.029063 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.029116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.029131 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.029171 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.029184 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.031321 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 09:03:16.449733576 +0000 UTC Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.060789 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.060797 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.060945 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.061142 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.061583 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.061636 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.132229 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.132309 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.132352 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.132381 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.132478 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.190715 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.190754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.190762 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.190775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.190785 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.205330 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:55Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.208760 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.208816 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.208829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.208851 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.208869 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.222358 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:55Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.226787 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.226842 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.226856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.226872 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.226882 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.244135 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:55Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.247979 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.248020 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.248029 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.248040 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.248048 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.259202 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:55Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.262751 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.262772 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.262783 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.262797 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.262809 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.274299 4791 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.276179 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:55Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:55 crc kubenswrapper[4791]: E0218 00:34:55.276295 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.277221 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.277253 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.277267 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.277282 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.277293 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.380408 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.380472 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.380495 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.380521 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.380543 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.483083 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.483145 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.483190 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.483219 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.483238 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.585969 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.586011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.586021 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.586037 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.586046 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.688807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.688869 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.688880 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.688896 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.688908 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.791986 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.792044 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.792064 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.792088 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.792105 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.895256 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.895298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.895310 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.895327 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.895340 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.998689 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.998754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.998774 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.998798 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:55 crc kubenswrapper[4791]: I0218 00:34:55.998815 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:55Z","lastTransitionTime":"2026-02-18T00:34:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.032483 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 06:51:25.399432009 +0000 UTC Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.100940 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.101000 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.101017 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.101041 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.101064 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.203599 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.203645 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.203654 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.203668 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.203678 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.278673 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/0.log" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.282224 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467" exitCode=1 Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.282261 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.283027 4791 scope.go:117] "RemoveContainer" containerID="ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.298969 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.305679 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.305766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.305785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.305808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.305824 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.323428 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.343547 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.362690 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.387888 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.409372 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.409424 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.409441 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.409464 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.409481 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.422579 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"Policy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.494886 6101 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0218 00:34:55.494896 6101 handler.go:208] Removed *v1.Node event handler 7\\\\nI0218 00:34:55.494904 6101 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0218 00:34:55.495049 6101 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495383 6101 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495512 6101 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495656 6101 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495782 6101 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.496101 6101 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:55.496121 6101 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0218 00:34:55.496168 6101 factory.go:656] Stopping watch factory\\\\nI0218 00:34:55.496186 6101 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:55.496209 6101 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.448424 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.491878 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.515731 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.515789 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.515817 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.515847 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.515871 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.520803 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.538004 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.558345 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.574959 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.588193 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.600637 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.614244 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:56Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.618362 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.618395 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.618404 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.618417 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.618426 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.764529 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.764556 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.764564 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.764577 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.764586 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.866733 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.867079 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.867089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.867102 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.867111 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.978204 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.978278 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.978298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.978326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:56 crc kubenswrapper[4791]: I0218 00:34:56.978345 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:56Z","lastTransitionTime":"2026-02-18T00:34:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.033267 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 19:46:22.710580704 +0000 UTC Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.061060 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.061093 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:57 crc kubenswrapper[4791]: E0218 00:34:57.061202 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.061231 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:57 crc kubenswrapper[4791]: E0218 00:34:57.061340 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:57 crc kubenswrapper[4791]: E0218 00:34:57.061462 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.061922 4791 scope.go:117] "RemoveContainer" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.080590 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.080619 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.080628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.080640 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.080649 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.183179 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.183201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.183210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.183223 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.183231 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.284984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.285026 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.285042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.285065 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.285080 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.286936 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.288518 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.288739 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.290100 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/1.log" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.291287 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/0.log" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.293380 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" exitCode=1 Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.293420 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.293446 4791 scope.go:117] "RemoveContainer" containerID="ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.293956 4791 scope.go:117] "RemoveContainer" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" Feb 18 00:34:57 crc kubenswrapper[4791]: E0218 00:34:57.294074 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.304690 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.317724 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.320086 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.330755 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.342752 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.351486 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.363674 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.383827 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.388022 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.388056 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.388065 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.388079 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.388089 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.400724 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"Policy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.494886 6101 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0218 00:34:55.494896 6101 handler.go:208] Removed *v1.Node event handler 7\\\\nI0218 00:34:55.494904 6101 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0218 00:34:55.495049 6101 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495383 6101 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495512 6101 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495656 6101 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495782 6101 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.496101 6101 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:55.496121 6101 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0218 00:34:55.496168 6101 factory.go:656] Stopping watch factory\\\\nI0218 00:34:55.496186 6101 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:55.496209 6101 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.412817 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.428417 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.441880 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.454403 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.465639 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.487078 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.490416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.490460 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.490474 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.490495 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.490509 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.502385 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.517060 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.530963 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.550296 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.570732 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.584260 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.593317 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.593357 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.593370 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.593386 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.593397 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.607767 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba5952d1567b2378fcb4d3efd1cd86066e08f9d177d4992385204d5702a8e467\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:55Z\\\",\\\"message\\\":\\\"Policy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.494886 6101 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0218 00:34:55.494896 6101 handler.go:208] Removed *v1.Node event handler 7\\\\nI0218 00:34:55.494904 6101 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0218 00:34:55.495049 6101 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495383 6101 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495512 6101 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495656 6101 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.495782 6101 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0218 00:34:55.496101 6101 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:55.496121 6101 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0218 00:34:55.496168 6101 factory.go:656] Stopping watch factory\\\\nI0218 00:34:55.496186 6101 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:55.496209 6101 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.627455 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.642228 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.663652 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.676418 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.691694 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.698218 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.698256 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.698265 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.698279 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.698289 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.725571 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.742032 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.754200 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.766993 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:57Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.800212 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.800245 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.800255 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.800268 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.800278 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.902954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.902984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.902992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.903007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:57 crc kubenswrapper[4791]: I0218 00:34:57.903016 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:57Z","lastTransitionTime":"2026-02-18T00:34:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.006062 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.006106 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.006117 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.006134 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.006145 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.034359 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 23:07:22.545543959 +0000 UTC Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.108662 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.108698 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.108707 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.108721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.108732 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.211088 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.211114 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.211123 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.211137 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.211145 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312517 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/1.log" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312791 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312827 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312836 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312852 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.312862 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.317383 4791 scope.go:117] "RemoveContainer" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" Feb 18 00:34:58 crc kubenswrapper[4791]: E0218 00:34:58.317522 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.337676 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.364442 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.394941 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415246 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415407 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415439 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415450 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.415476 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.434103 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.451451 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.467977 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.480242 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.492977 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.504494 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.515052 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.518434 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.518472 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.518486 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.518507 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.518548 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.534663 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.550661 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.562219 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.575094 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.621242 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.621318 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.621329 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.621347 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.621359 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.680604 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r"] Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.681145 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.683392 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.683805 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.702025 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.719428 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.723963 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.724050 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.724067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.724089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.724104 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.735566 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.756795 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.770546 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.781727 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.781790 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.781833 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4sc7\" (UniqueName: \"kubernetes.io/projected/02f6ba8d-1be3-44e1-b1bf-21c188e59802-kube-api-access-c4sc7\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.781912 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.788775 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.806054 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.819293 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.826672 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.826723 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.826736 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.826752 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.826763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.831852 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.844556 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.856575 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.868138 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.882677 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.882722 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.882745 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4sc7\" (UniqueName: \"kubernetes.io/projected/02f6ba8d-1be3-44e1-b1bf-21c188e59802-kube-api-access-c4sc7\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.882793 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.883108 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.883454 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.884039 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.893488 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/02f6ba8d-1be3-44e1-b1bf-21c188e59802-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.901185 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.906880 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4sc7\" (UniqueName: \"kubernetes.io/projected/02f6ba8d-1be3-44e1-b1bf-21c188e59802-kube-api-access-c4sc7\") pod \"ovnkube-control-plane-749d76644c-7zq7r\" (UID: \"02f6ba8d-1be3-44e1-b1bf-21c188e59802\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.921362 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.929823 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.929874 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.929888 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.929909 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.929923 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:58Z","lastTransitionTime":"2026-02-18T00:34:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.934791 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:58Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:58 crc kubenswrapper[4791]: I0218 00:34:58.999716 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" Feb 18 00:34:59 crc kubenswrapper[4791]: W0218 00:34:59.017754 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02f6ba8d_1be3_44e1_b1bf_21c188e59802.slice/crio-8e7bb54829552c0b688a0f64ef89ae8d9252774471a2de7ee4cf2c02b7470c49 WatchSource:0}: Error finding container 8e7bb54829552c0b688a0f64ef89ae8d9252774471a2de7ee4cf2c02b7470c49: Status 404 returned error can't find the container with id 8e7bb54829552c0b688a0f64ef89ae8d9252774471a2de7ee4cf2c02b7470c49 Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.032797 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.032835 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.032846 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.032863 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.032878 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.035223 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 23:56:58.367684636 +0000 UTC Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.060504 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.060616 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:34:59 crc kubenswrapper[4791]: E0218 00:34:59.060739 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.060847 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:34:59 crc kubenswrapper[4791]: E0218 00:34:59.060928 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:34:59 crc kubenswrapper[4791]: E0218 00:34:59.061027 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.081891 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.105994 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.123576 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136184 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136231 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136246 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136281 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.136406 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.150389 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.161367 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.173262 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.198440 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.213208 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.227406 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.238497 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.238540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.238551 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.238566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.238577 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.242209 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.259772 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.277940 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.295077 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.311794 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.328073 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" event={"ID":"02f6ba8d-1be3-44e1-b1bf-21c188e59802","Type":"ContainerStarted","Data":"aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.328129 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" event={"ID":"02f6ba8d-1be3-44e1-b1bf-21c188e59802","Type":"ContainerStarted","Data":"8e7bb54829552c0b688a0f64ef89ae8d9252774471a2de7ee4cf2c02b7470c49"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.329438 4791 scope.go:117] "RemoveContainer" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" Feb 18 00:34:59 crc kubenswrapper[4791]: E0218 00:34:59.329704 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.333602 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:34:59Z is after 2025-08-24T17:21:41Z" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.343051 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.343148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.343487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.343759 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.344131 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.447299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.447351 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.447365 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.447416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.447428 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.550503 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.550583 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.550603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.550629 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.550647 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.652679 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.652721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.652736 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.652752 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.652763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.755655 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.755729 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.755746 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.755771 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.755789 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.858919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.858991 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.859010 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.859034 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.859076 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.962135 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.962232 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.962250 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.962273 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:34:59 crc kubenswrapper[4791]: I0218 00:34:59.962291 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:34:59Z","lastTransitionTime":"2026-02-18T00:34:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.036127 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 16:41:39.078750799 +0000 UTC Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.066092 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.066231 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.066261 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.066292 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.066316 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.169618 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.169703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.169727 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.169757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.169788 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.273850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.273911 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.273928 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.273954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.273971 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.335407 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" event={"ID":"02f6ba8d-1be3-44e1-b1bf-21c188e59802","Type":"ContainerStarted","Data":"eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.358797 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.378108 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.378246 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.378567 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.378850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.379425 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.380610 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.403687 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.425014 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.444127 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.462112 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482381 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482432 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482454 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482481 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482500 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.482723 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.512803 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.537608 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.553318 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.565225 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-jq75l"] Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.565977 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: E0218 00:35:00.566077 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.569064 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.586249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.586309 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.586326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.586354 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.586376 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.589691 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.602357 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.602461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55h6k\" (UniqueName: \"kubernetes.io/projected/afcf9ee1-4224-441c-a98d-9330bed34065-kube-api-access-55h6k\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.605219 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.616338 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.625255 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.635339 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.647514 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.661555 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.675332 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.689831 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.689878 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.689932 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.689953 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.689966 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.692818 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.704017 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.704145 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55h6k\" (UniqueName: \"kubernetes.io/projected/afcf9ee1-4224-441c-a98d-9330bed34065-kube-api-access-55h6k\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: E0218 00:35:00.704288 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:00 crc kubenswrapper[4791]: E0218 00:35:00.704376 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:01.204356 +0000 UTC m=+42.772369180 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.704997 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.716381 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.718958 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55h6k\" (UniqueName: \"kubernetes.io/projected/afcf9ee1-4224-441c-a98d-9330bed34065-kube-api-access-55h6k\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.730041 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.748683 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.760935 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.775144 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.789890 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.792812 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.792881 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.792907 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.792940 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.792958 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.803851 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.818747 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.833644 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.849896 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.870868 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.896069 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.896128 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.896222 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.896253 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.896272 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:00Z","lastTransitionTime":"2026-02-18T00:35:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:00 crc kubenswrapper[4791]: I0218 00:35:00.898641 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:00Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.002244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.002283 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.002298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.002313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.002323 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.036727 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 12:59:16.648975963 +0000 UTC Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.061210 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.061363 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.061409 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:01 crc kubenswrapper[4791]: E0218 00:35:01.061903 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:01 crc kubenswrapper[4791]: E0218 00:35:01.061903 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:01 crc kubenswrapper[4791]: E0218 00:35:01.062376 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.105920 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.105972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.105983 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.106002 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.106015 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.208386 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.208426 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.208437 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.208455 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.208466 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.209443 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:01 crc kubenswrapper[4791]: E0218 00:35:01.209588 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:01 crc kubenswrapper[4791]: E0218 00:35:01.209649 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:02.209633463 +0000 UTC m=+43.777646633 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.311979 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.312330 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.312453 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.312523 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.312591 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.415919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.416209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.416302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.416388 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.416505 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.518439 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.518478 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.518487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.518500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.518507 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.621403 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.621777 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.621984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.622148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.622377 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.725809 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.725850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.725861 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.725878 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.725890 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.828615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.828691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.828703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.828722 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.828736 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.931374 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.931458 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.931485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.931513 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:01 crc kubenswrapper[4791]: I0218 00:35:01.931531 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:01Z","lastTransitionTime":"2026-02-18T00:35:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.033926 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.033966 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.033977 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.033992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.034003 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.037275 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 23:13:28.483399788 +0000 UTC Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.060539 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:02 crc kubenswrapper[4791]: E0218 00:35:02.060645 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.137712 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.137764 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.137781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.137805 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.137824 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.222566 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:02 crc kubenswrapper[4791]: E0218 00:35:02.222813 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:02 crc kubenswrapper[4791]: E0218 00:35:02.222949 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:04.222923373 +0000 UTC m=+45.790936583 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.241359 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.241422 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.241441 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.241465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.241486 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.344637 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.344684 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.344703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.344726 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.344743 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.446737 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.446775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.446783 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.446803 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.446812 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.548987 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.549080 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.549099 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.549123 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.549140 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.651799 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.651871 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.651880 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.651893 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.651902 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.755314 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.755370 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.755387 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.755410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.755426 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.857624 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.857661 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.857669 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.857684 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.857694 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.961897 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.961965 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.961984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.962008 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:02 crc kubenswrapper[4791]: I0218 00:35:02.962028 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:02Z","lastTransitionTime":"2026-02-18T00:35:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.037749 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 07:29:09.06857388 +0000 UTC Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.060396 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:03 crc kubenswrapper[4791]: E0218 00:35:03.060585 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.061047 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:03 crc kubenswrapper[4791]: E0218 00:35:03.061147 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.061289 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:03 crc kubenswrapper[4791]: E0218 00:35:03.061422 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.067401 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.067538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.067608 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.067642 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.067718 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.170639 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.170673 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.170680 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.170692 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.170702 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.273898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.273943 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.273952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.273966 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.273976 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.376228 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.376273 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.376288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.376344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.376362 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.479075 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.479120 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.479135 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.479176 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.479191 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.581885 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.581911 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.581921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.581937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.581952 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.684754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.684783 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.684811 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.684843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.684854 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.787790 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.787857 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.787875 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.787900 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.787918 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.890323 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.890350 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.890361 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.890376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.890387 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.992538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.992569 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.992579 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.992594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:03 crc kubenswrapper[4791]: I0218 00:35:03.992604 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:03Z","lastTransitionTime":"2026-02-18T00:35:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.038702 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 09:59:47.782179745 +0000 UTC Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.060381 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:04 crc kubenswrapper[4791]: E0218 00:35:04.060601 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.095668 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.095735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.095749 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.095796 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.095808 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.198623 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.198659 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.198673 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.198692 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.198705 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.243631 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:04 crc kubenswrapper[4791]: E0218 00:35:04.243757 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:04 crc kubenswrapper[4791]: E0218 00:35:04.243812 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:08.243794429 +0000 UTC m=+49.811807609 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.301549 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.301623 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.301665 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.301695 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.301714 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.405005 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.405064 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.405084 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.405108 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.405121 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.508718 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.508768 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.508816 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.508837 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.508852 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.612475 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.612514 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.612524 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.612539 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.612550 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.715114 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.715218 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.715243 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.715276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.715300 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.819120 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.819205 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.819223 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.819245 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.819262 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.922330 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.922399 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.922418 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.922442 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:04 crc kubenswrapper[4791]: I0218 00:35:04.922463 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:04Z","lastTransitionTime":"2026-02-18T00:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.025152 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.025288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.025313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.025338 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.025355 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.039537 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 06:09:23.434303393 +0000 UTC Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.060151 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.060215 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.060295 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.060493 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.060854 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.061005 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.129555 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.129619 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.129637 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.129668 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.129686 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.233078 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.233193 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.233228 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.233260 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.233327 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.336383 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.336451 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.336474 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.336507 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.336529 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.439913 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.439957 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.439972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.439996 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.440011 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.543232 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.543290 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.543308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.543332 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.543351 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.644867 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.644957 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.644982 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.645016 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.645044 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.663665 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:05Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.668198 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.668260 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.668281 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.668309 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.668325 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.684798 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:05Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.689201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.689243 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.689255 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.689273 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.689287 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.702817 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:05Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.707387 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.707530 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.707625 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.707662 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.707687 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.727624 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:05Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.732013 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.732061 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.732074 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.732092 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.732107 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.747087 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:05Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:05 crc kubenswrapper[4791]: E0218 00:35:05.747292 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.749124 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.749174 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.749185 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.749199 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.749209 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.852054 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.852104 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.852126 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.852187 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.852207 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.955313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.955400 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.955424 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.955452 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:05 crc kubenswrapper[4791]: I0218 00:35:05.955475 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:05Z","lastTransitionTime":"2026-02-18T00:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.040461 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 09:57:54.979256717 +0000 UTC Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.057397 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.057637 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.057692 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.057727 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.057750 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.061035 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:06 crc kubenswrapper[4791]: E0218 00:35:06.061281 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.160003 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.160047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.160060 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.160078 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.160091 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.262630 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.262685 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.262694 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.262709 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.262719 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.365932 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.366025 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.366034 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.366071 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.366142 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.469735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.469775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.469784 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.469800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.469810 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.573521 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.573580 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.573593 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.573615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.573630 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.676545 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.676598 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.676611 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.676630 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.676643 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.779501 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.779563 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.779577 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.779595 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.779616 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.883190 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.883247 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.883264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.883289 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.883309 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.986690 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.986734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.986745 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.986762 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:06 crc kubenswrapper[4791]: I0218 00:35:06.986774 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:06Z","lastTransitionTime":"2026-02-18T00:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.040600 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 11:24:53.919316716 +0000 UTC Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.061356 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.061356 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:07 crc kubenswrapper[4791]: E0218 00:35:07.061574 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.061651 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:07 crc kubenswrapper[4791]: E0218 00:35:07.061688 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:07 crc kubenswrapper[4791]: E0218 00:35:07.061894 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.090325 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.090917 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.091135 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.091480 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.091651 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.194952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.195034 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.195058 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.195098 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.195126 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.300240 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.300329 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.300355 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.300384 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.300402 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.404092 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.404231 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.404252 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.404279 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.404306 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.507913 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.507987 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.508006 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.508036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.508055 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.611465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.611537 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.611556 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.611584 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.611609 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.714243 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.714316 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.714334 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.714363 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.714385 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.818071 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.818111 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.818123 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.818139 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.818150 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.921733 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.921804 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.921822 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.921852 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:07 crc kubenswrapper[4791]: I0218 00:35:07.921872 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:07Z","lastTransitionTime":"2026-02-18T00:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.025617 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.025699 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.025725 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.025763 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.025789 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.041216 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 05:27:25.740820299 +0000 UTC Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.060749 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:08 crc kubenswrapper[4791]: E0218 00:35:08.060973 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.128846 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.128916 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.128940 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.128971 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.128993 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.232951 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.233024 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.233044 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.233075 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.233106 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.291021 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:08 crc kubenswrapper[4791]: E0218 00:35:08.291336 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:08 crc kubenswrapper[4791]: E0218 00:35:08.291492 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:16.291458904 +0000 UTC m=+57.859472114 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.336194 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.336252 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.336272 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.336302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.336324 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.439647 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.439710 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.439735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.439764 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.439786 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.475693 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.492902 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.509124 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.527810 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.544394 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.544500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.544517 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.544546 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.544564 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.545349 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.566210 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.586301 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.600624 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.620940 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.639720 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.648393 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.648445 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.648465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.648490 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.648509 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.663378 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.683682 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.705807 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.727043 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.746423 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.751113 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.751237 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.751263 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.751296 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.751319 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.764958 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.783069 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.808631 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.857696 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.857766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.857785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.857814 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.857832 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.865180 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:08Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.960790 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.960878 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.960891 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.960908 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:08 crc kubenswrapper[4791]: I0218 00:35:08.960932 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:08Z","lastTransitionTime":"2026-02-18T00:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.041861 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 04:01:20.624591437 +0000 UTC Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.060436 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.060546 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:09 crc kubenswrapper[4791]: E0218 00:35:09.060653 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:09 crc kubenswrapper[4791]: E0218 00:35:09.060750 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.060456 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:09 crc kubenswrapper[4791]: E0218 00:35:09.060888 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.063299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.063335 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.063346 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.063364 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.063382 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.086821 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.109147 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.129209 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.150759 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.165410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.165465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.165482 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.165512 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.165530 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.168694 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.182432 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.197646 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.219503 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.233982 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.249149 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.269300 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.269342 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.269356 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.269376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.269391 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.270492 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.290484 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.311789 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.332814 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.352769 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.367514 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.372516 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.372624 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.372691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.372728 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.372766 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.396438 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.420362 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:09Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.476422 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.476510 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.476535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.476566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.476586 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.579765 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.579810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.579821 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.579837 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.579848 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.683357 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.684280 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.684502 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.684659 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.684799 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.788896 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.788972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.788997 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.789027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.789050 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.893030 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.893096 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.893114 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.893138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.893191 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.997102 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.997215 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.997244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.997276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:09 crc kubenswrapper[4791]: I0218 00:35:09.997298 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:09Z","lastTransitionTime":"2026-02-18T00:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.043072 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 15:51:59.173916672 +0000 UTC Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.060526 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.060722 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.100571 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.100650 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.100687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.100722 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.100746 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.203982 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.204035 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.204054 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.204077 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.204095 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.307717 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.307779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.307800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.307824 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.307841 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.410528 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.410571 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.410589 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.410612 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.410625 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.513102 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.513138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.513150 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.513204 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.513216 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.617458 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.617499 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.617509 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.617527 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.617538 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.720249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.720299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.720311 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.720328 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.720340 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.823721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.823759 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.823772 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.823795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.823805 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.919387 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.919514 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.919546 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.919577 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.919610 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.919728 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.919745 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.919757 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.919814 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:42.919799021 +0000 UTC m=+84.487812201 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.919985 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:35:42.919965626 +0000 UTC m=+84.487978806 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920013 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920071 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:42.920054579 +0000 UTC m=+84.488067769 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920072 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920124 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:42.920110851 +0000 UTC m=+84.488124031 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920322 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920393 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920413 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:10 crc kubenswrapper[4791]: E0218 00:35:10.920517 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:42.920488321 +0000 UTC m=+84.488501501 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.926274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.926314 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.926338 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.926363 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:10 crc kubenswrapper[4791]: I0218 00:35:10.926380 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:10Z","lastTransitionTime":"2026-02-18T00:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.029918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.029959 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.029972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.029992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.030010 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.044104 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 07:20:58.084455969 +0000 UTC Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.060741 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:11 crc kubenswrapper[4791]: E0218 00:35:11.060897 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.060741 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.060741 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:11 crc kubenswrapper[4791]: E0218 00:35:11.061090 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:11 crc kubenswrapper[4791]: E0218 00:35:11.061309 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.134131 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.134248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.134270 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.134306 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.134331 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.237393 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.237467 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.237489 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.237515 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.237544 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.340898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.340945 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.340965 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.340989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.341005 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.444579 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.444650 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.444671 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.444758 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.444777 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.547954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.548028 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.548053 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.548089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.548112 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.651918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.652009 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.652036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.652070 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.652092 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.756268 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.756371 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.756399 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.756435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.756460 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.864236 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.865827 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.865872 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.865884 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.865902 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.865914 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.886572 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.904630 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.924850 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.939811 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.952882 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.968612 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.968697 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.968714 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.968736 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.968752 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:11Z","lastTransitionTime":"2026-02-18T00:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.973505 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:11 crc kubenswrapper[4791]: I0218 00:35:11.990506 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:11Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.017143 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.030579 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.043489 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.044516 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 18:02:25.041478748 +0000 UTC Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.055548 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.061375 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:12 crc kubenswrapper[4791]: E0218 00:35:12.061752 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.062107 4791 scope.go:117] "RemoveContainer" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.064578 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.070751 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.070784 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.070795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.070810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.070821 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.077838 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.092118 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.118286 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.137665 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.151754 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.169432 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.173276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.173308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.173319 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.173336 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.173350 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.275232 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.275299 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.275309 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.275348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.275361 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.377637 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.377691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.377704 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.377722 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.377736 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.395056 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/1.log" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.399806 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.400289 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.419979 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.437830 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.449348 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.467711 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.481089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.481183 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.481197 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.481216 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.481229 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.489052 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.504395 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.523087 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.539439 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.554844 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.575278 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.592954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.592997 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.593007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.593023 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.593032 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.595856 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.623226 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.635475 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.644910 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.656615 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.676606 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.688185 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.695789 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.695824 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.695839 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.695856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.695869 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.700464 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.798278 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.798314 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.798326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.798343 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.798355 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.901144 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.901201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.901209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.901228 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:12 crc kubenswrapper[4791]: I0218 00:35:12.901237 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:12Z","lastTransitionTime":"2026-02-18T00:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.096145 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:13 crc kubenswrapper[4791]: E0218 00:35:13.096266 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.096520 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.096380 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 19:01:45.06363372 +0000 UTC Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.096520 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:13 crc kubenswrapper[4791]: E0218 00:35:13.096645 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:13 crc kubenswrapper[4791]: E0218 00:35:13.096707 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.098051 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.098076 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.098089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.098103 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.098114 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.200128 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.200189 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.200200 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.200213 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.200223 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.302343 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.302386 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.302395 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.302409 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.302419 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.403856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.403885 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.403894 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.403906 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.403915 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.405250 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/2.log" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.405876 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/1.log" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.408248 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" exitCode=1 Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.408275 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.408303 4791 scope.go:117] "RemoveContainer" containerID="cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.408993 4791 scope.go:117] "RemoveContainer" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" Feb 18 00:35:13 crc kubenswrapper[4791]: E0218 00:35:13.409145 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.426142 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.445952 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cedeb29806a5a6a1c21181d34fc0ec414201c0fd58f89d94156ff0546de09206\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:34:57Z\\\",\\\"message\\\":\\\"ry.go:140\\\\nI0218 00:34:57.145594 6231 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0218 00:34:57.145711 6231 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.145882 6231 reflector.go:311] Stopping reflector *v1.UserDefinedNetwork (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI0218 00:34:57.146212 6231 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0218 00:34:57.146249 6231 factory.go:656] Stopping watch factory\\\\nI0218 00:34:57.146268 6231 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0218 00:34:57.169438 6231 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI0218 00:34:57.169465 6231 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI0218 00:34:57.169532 6231 ovnkube.go:599] Stopped ovnkube\\\\nI0218 00:34:57.169558 6231 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0218 00:34:57.169637 6231 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.466997 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.480683 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.490429 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.502144 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.505486 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.505514 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.505523 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.505535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.505545 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.513249 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.523284 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.532841 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.545308 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.557418 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.567485 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.591994 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.607954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.607983 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.607992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.608007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.608015 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.607994 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.618474 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.630290 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.640471 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.651668 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:13Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.710267 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.710313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.710321 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.710335 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.710344 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.812754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.812796 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.812812 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.812828 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.812837 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.915573 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.915652 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.915670 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.915693 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:13 crc kubenswrapper[4791]: I0218 00:35:13.915711 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:13Z","lastTransitionTime":"2026-02-18T00:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.019740 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.019774 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.019785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.019800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.019810 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.060404 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:14 crc kubenswrapper[4791]: E0218 00:35:14.060581 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.096658 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 17:23:47.324647028 +0000 UTC Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.122506 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.122541 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.122550 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.122565 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.122575 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.225986 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.226057 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.226076 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.226100 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.226119 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.329253 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.329286 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.329294 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.329308 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.329317 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.413948 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/2.log" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.418131 4791 scope.go:117] "RemoveContainer" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" Feb 18 00:35:14 crc kubenswrapper[4791]: E0218 00:35:14.418343 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.428401 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.432235 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.432264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.432273 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.432288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.432298 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.440443 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.452011 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.465456 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.476148 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.497098 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.518058 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.535185 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.535221 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.535232 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.535248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.535258 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.548745 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.560534 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.574965 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.606629 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.621915 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.637919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.637950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.637960 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.637975 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.637986 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.640791 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.655410 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.670993 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.687255 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.700072 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.712435 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:14Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.740344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.740401 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.740425 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.740461 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.740484 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.843508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.843593 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.843603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.843617 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.843626 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.946611 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.946651 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.946659 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.946673 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:14 crc kubenswrapper[4791]: I0218 00:35:14.946682 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:14Z","lastTransitionTime":"2026-02-18T00:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.049220 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.049254 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.049262 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.049274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.049285 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.060868 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.060925 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.060969 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:15 crc kubenswrapper[4791]: E0218 00:35:15.061110 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:15 crc kubenswrapper[4791]: E0218 00:35:15.062006 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:15 crc kubenswrapper[4791]: E0218 00:35:15.062445 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.097642 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 11:50:01.771474239 +0000 UTC Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.151960 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.151994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.152003 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.152015 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.152023 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.254067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.254118 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.254128 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.254142 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.254172 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.356146 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.356205 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.356224 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.356239 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.356250 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.458306 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.458373 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.458391 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.458413 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.458432 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.560791 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.560829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.560839 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.560854 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.560864 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.663625 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.663666 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.663684 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.663702 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.663712 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.766440 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.766486 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.766497 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.766512 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.766521 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.868357 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.868398 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.868411 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.868426 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.868434 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.971301 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.971361 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.971382 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.971412 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:15 crc kubenswrapper[4791]: I0218 00:35:15.971436 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:15Z","lastTransitionTime":"2026-02-18T00:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.013618 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.013682 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.013707 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.013747 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.013769 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.027309 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:16Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.032196 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.032233 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.032245 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.032259 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.032268 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.047839 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:16Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.052037 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.052099 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.052125 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.052242 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.052287 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.060819 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.060925 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.070329 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:16Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.073194 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.073257 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.073274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.073298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.073315 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.090657 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:16Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.093766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.093807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.093817 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.093834 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.093846 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.098576 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 14:51:10.082354122 +0000 UTC Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.110747 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:16Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.110855 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.112447 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.112471 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.112482 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.112502 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.112515 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.215720 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.215777 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.215789 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.215808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.215818 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.318073 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.318109 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.318117 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.318130 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.318140 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.324802 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.324955 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:16 crc kubenswrapper[4791]: E0218 00:35:16.325012 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:35:32.324996326 +0000 UTC m=+73.893009496 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.419994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.420042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.420055 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.420074 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.420087 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.521982 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.522019 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.522028 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.522040 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.522048 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.623890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.623926 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.623937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.623952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.623962 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.726679 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.726722 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.726734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.726752 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.726763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.829955 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.829989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.829997 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.830011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.830020 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.931615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.931655 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.931664 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.931678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:16 crc kubenswrapper[4791]: I0218 00:35:16.931687 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:16Z","lastTransitionTime":"2026-02-18T00:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.033744 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.033779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.033788 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.033800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.033808 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.060250 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.060318 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:17 crc kubenswrapper[4791]: E0218 00:35:17.060377 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:17 crc kubenswrapper[4791]: E0218 00:35:17.060466 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.060568 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:17 crc kubenswrapper[4791]: E0218 00:35:17.060650 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.099244 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 09:46:46.580928356 +0000 UTC Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.136342 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.136376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.136385 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.136398 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.136407 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.238577 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.238615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.238627 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.238641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.238652 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.341205 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.341249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.341259 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.341274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.341285 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.442857 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.443067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.443128 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.443203 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.443293 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.545259 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.545332 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.545353 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.545386 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.545406 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.648459 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.648536 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.648557 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.648582 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.648605 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.751087 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.751145 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.751192 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.751217 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.751234 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.854847 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.855322 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.855562 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.855821 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.855965 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.958882 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.958933 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.958950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.958973 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:17 crc kubenswrapper[4791]: I0218 00:35:17.958991 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:17Z","lastTransitionTime":"2026-02-18T00:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.060836 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:18 crc kubenswrapper[4791]: E0218 00:35:18.060992 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.061332 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.061376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.061392 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.061411 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.061428 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.101107 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 06:45:24.825833188 +0000 UTC Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.163653 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.163729 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.163753 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.163785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.163808 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.266582 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.266628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.266643 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.266663 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.266677 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.368895 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.368926 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.368935 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.368949 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.368959 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.471371 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.471421 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.471435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.471455 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.471467 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.573808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.573855 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.573873 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.573890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.573901 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.675978 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.676014 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.676025 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.676041 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.676053 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.778802 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.778862 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.778879 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.778901 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.778921 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.881311 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.881818 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.881990 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.882125 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.882281 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.984808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.984843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.984853 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.984868 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:18 crc kubenswrapper[4791]: I0218 00:35:18.984878 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:18Z","lastTransitionTime":"2026-02-18T00:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.060667 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.060704 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:19 crc kubenswrapper[4791]: E0218 00:35:19.060794 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.060832 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:19 crc kubenswrapper[4791]: E0218 00:35:19.060993 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:19 crc kubenswrapper[4791]: E0218 00:35:19.061048 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.074203 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.085339 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.087603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.087735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.087827 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.087928 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.088008 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.097515 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.101882 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 17:27:28.583152843 +0000 UTC Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.108228 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.127364 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.144011 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.158444 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.185531 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.191116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.191290 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.191396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.191672 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.191756 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.201147 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.217198 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.230599 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.241276 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.254905 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.273080 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.286140 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.294457 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.294562 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.294575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.294594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.294627 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.297595 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.310815 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.320489 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:19Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.396828 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.396888 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.396900 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.396918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.396930 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.499236 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.499300 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.499319 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.499344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.499360 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.601936 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.601995 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.602012 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.602036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.602053 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.704420 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.704451 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.704476 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.704490 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.704499 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.806987 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.807063 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.807087 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.807116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.807138 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.909320 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.909355 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.909374 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.909392 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:19 crc kubenswrapper[4791]: I0218 00:35:19.909404 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:19Z","lastTransitionTime":"2026-02-18T00:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.011519 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.011564 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.011576 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.011594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.011609 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.060220 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:20 crc kubenswrapper[4791]: E0218 00:35:20.060343 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.102433 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 16:09:44.474504818 +0000 UTC Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.113473 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.113508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.113521 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.113538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.113550 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.216617 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.216682 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.216697 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.216721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.216736 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.319276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.319315 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.319325 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.319344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.319353 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.422117 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.422220 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.422267 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.422289 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.422302 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.525081 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.525129 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.525141 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.525181 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.525194 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.628729 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.628796 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.628808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.628827 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.628841 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.731691 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.731762 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.731787 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.731811 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.731827 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.835511 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.835571 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.835589 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.835613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.835629 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.938314 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.938395 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.938425 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.938452 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:20 crc kubenswrapper[4791]: I0218 00:35:20.938469 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:20Z","lastTransitionTime":"2026-02-18T00:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.041333 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.041382 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.041396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.041417 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.041429 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.060763 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.060768 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.060763 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:21 crc kubenswrapper[4791]: E0218 00:35:21.060885 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:21 crc kubenswrapper[4791]: E0218 00:35:21.060958 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:21 crc kubenswrapper[4791]: E0218 00:35:21.061026 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.103139 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 01:36:38.855404277 +0000 UTC Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.144659 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.144723 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.144749 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.144780 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.144802 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.247257 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.247326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.247348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.247387 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.247410 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.349783 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.349831 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.349844 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.349863 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.349878 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.453083 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.453145 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.453188 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.453215 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.453232 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.556238 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.556304 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.556321 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.556347 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.556365 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.659922 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.659988 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.660007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.660033 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.660049 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.762186 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.762234 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.762258 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.762278 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.762292 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.865269 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.865681 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.865808 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.865950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.866059 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.969235 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.969277 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.969287 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.969303 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:21 crc kubenswrapper[4791]: I0218 00:35:21.969313 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:21Z","lastTransitionTime":"2026-02-18T00:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.061094 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:22 crc kubenswrapper[4791]: E0218 00:35:22.061487 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.071688 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.071764 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.071789 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.071822 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.071848 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.103438 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 23:00:52.475955027 +0000 UTC Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.174063 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.174130 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.174141 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.174175 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.174197 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.276396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.276436 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.276447 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.276464 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.276475 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.379174 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.379217 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.379225 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.379239 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.379249 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.481773 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.481832 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.481847 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.481865 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.481879 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.584890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.585186 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.585257 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.585324 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.585381 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.688129 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.688429 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.688522 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.688599 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.688671 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.791287 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.791574 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.791641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.791707 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.791808 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.893720 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.893769 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.893781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.893800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.893814 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.995934 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.995982 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.995995 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.996016 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:22 crc kubenswrapper[4791]: I0218 00:35:22.996028 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:22Z","lastTransitionTime":"2026-02-18T00:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.060765 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.060833 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.060764 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:23 crc kubenswrapper[4791]: E0218 00:35:23.061242 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:23 crc kubenswrapper[4791]: E0218 00:35:23.061346 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:23 crc kubenswrapper[4791]: E0218 00:35:23.061440 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.098341 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.098576 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.098649 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.098723 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.098785 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.103801 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 04:02:04.605786811 +0000 UTC Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.201216 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.201264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.201275 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.201291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.201302 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.303496 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.303525 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.303535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.303547 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.303555 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.405242 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.405266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.405274 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.405286 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.405294 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.506417 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.506444 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.506453 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.506465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.506475 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.608776 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.608905 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.609008 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.609111 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.609232 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.711091 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.711391 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.711467 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.711549 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.711636 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.814394 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.814424 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.814433 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.814445 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.814456 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.917606 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.917856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.917939 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.918040 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:23 crc kubenswrapper[4791]: I0218 00:35:23.918122 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:23Z","lastTransitionTime":"2026-02-18T00:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.020820 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.020864 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.020873 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.020889 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.020898 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.060408 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:24 crc kubenswrapper[4791]: E0218 00:35:24.060639 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.104410 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 00:59:34.496624153 +0000 UTC Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.123275 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.123533 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.123641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.123734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.123824 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.229433 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.229498 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.229514 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.229593 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.229609 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.332205 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.332248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.332260 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.332278 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.332290 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.434546 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.434585 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.434594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.434607 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.434616 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.536536 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.536575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.536584 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.536599 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.536609 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.638919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.638959 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.638968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.638981 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.638991 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.741867 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.741937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.741956 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.741980 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.741997 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.844625 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.844754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.844781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.844810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.844835 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.947407 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.947511 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.947535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.947615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:24 crc kubenswrapper[4791]: I0218 00:35:24.947634 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:24Z","lastTransitionTime":"2026-02-18T00:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.049739 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.049779 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.049788 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.049804 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.049815 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.061071 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.061106 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:25 crc kubenswrapper[4791]: E0218 00:35:25.061186 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.061223 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:25 crc kubenswrapper[4791]: E0218 00:35:25.061334 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:25 crc kubenswrapper[4791]: E0218 00:35:25.061431 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.105105 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 11:40:03.375150897 +0000 UTC Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.152520 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.152570 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.152579 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.152595 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.152604 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.254325 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.254368 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.254380 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.254401 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.254410 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.356999 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.357048 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.357061 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.357082 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.357098 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.459082 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.459138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.459148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.459179 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.459187 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.561393 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.561444 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.561456 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.561470 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.561478 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.663952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.663992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.664003 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.664018 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.664031 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.765967 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.765997 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.766006 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.766019 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.766027 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.868509 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.868540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.868548 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.868560 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.868569 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.970857 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.970890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.970898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.970912 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:25 crc kubenswrapper[4791]: I0218 00:35:25.970923 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:25Z","lastTransitionTime":"2026-02-18T00:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.060452 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.060895 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.061106 4791 scope.go:117] "RemoveContainer" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.061396 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.072451 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.072481 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.072494 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.072508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.072519 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.105237 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 23:05:44.900754319 +0000 UTC Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.174775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.174801 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.174810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.174835 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.174844 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.276972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.277012 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.277022 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.277036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.277045 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.292859 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.292917 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.292931 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.292950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.292964 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.307414 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:26Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.310595 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.310631 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.310640 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.310655 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.310669 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.326107 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:26Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.330075 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.330139 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.330181 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.330207 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.330226 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.348178 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:26Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.351583 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.351658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.351682 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.351711 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.351734 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.370666 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:26Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.374178 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.374211 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.374222 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.374237 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.374268 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.389978 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:26Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:26 crc kubenswrapper[4791]: E0218 00:35:26.390125 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.391296 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.391318 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.391329 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.391341 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.391352 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.493969 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.494015 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.494024 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.494040 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.494051 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.596313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.596359 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.596371 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.596385 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.596395 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.698486 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.698525 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.698535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.698548 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.698558 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.800259 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.800296 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.800306 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.800320 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.800329 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.902192 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.902248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.902264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.902288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:26 crc kubenswrapper[4791]: I0218 00:35:26.902300 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:26Z","lastTransitionTime":"2026-02-18T00:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.004574 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.004614 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.004622 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.004636 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.004645 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.060360 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.060395 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.060471 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:27 crc kubenswrapper[4791]: E0218 00:35:27.060501 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:27 crc kubenswrapper[4791]: E0218 00:35:27.060585 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:27 crc kubenswrapper[4791]: E0218 00:35:27.060689 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.105900 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 09:11:30.47771715 +0000 UTC Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.107774 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.107818 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.107831 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.107850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.107866 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.210487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.210540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.210553 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.210572 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.210585 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.312988 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.313026 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.313035 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.313047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.313056 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.414940 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.414984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.414996 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.415014 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.415023 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.516721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.516946 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.517042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.517110 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.517207 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.619426 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.619476 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.619488 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.619505 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.619517 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.721615 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.721655 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.721665 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.721681 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.721691 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.824176 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.824225 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.824237 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.824257 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.824270 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.926945 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.926991 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.927001 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.927016 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:27 crc kubenswrapper[4791]: I0218 00:35:27.927026 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:27Z","lastTransitionTime":"2026-02-18T00:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.029112 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.029177 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.029191 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.029209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.029220 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.060487 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:28 crc kubenswrapper[4791]: E0218 00:35:28.060594 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.106846 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 19:30:29.286945819 +0000 UTC Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.131448 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.131494 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.131510 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.131532 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.131565 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.233830 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.233898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.233906 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.233919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.233930 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.335924 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.335958 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.335967 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.335979 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.335988 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.438479 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.438518 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.438530 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.438547 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.438559 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.541512 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.541539 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.541547 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.541561 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.541570 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.643475 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.643549 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.643574 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.643600 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.643617 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.746503 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.746649 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.746667 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.746692 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.746709 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.848485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.848584 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.848605 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.848666 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.848686 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.951390 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.951424 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.951435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.951448 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:28 crc kubenswrapper[4791]: I0218 00:35:28.951459 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:28Z","lastTransitionTime":"2026-02-18T00:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.053046 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.053198 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.053227 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.053250 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.053275 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.060489 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.060507 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.060553 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:29 crc kubenswrapper[4791]: E0218 00:35:29.060579 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:29 crc kubenswrapper[4791]: E0218 00:35:29.060693 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:29 crc kubenswrapper[4791]: E0218 00:35:29.060770 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.072584 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.084333 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.094908 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.107619 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 18:29:19.762622258 +0000 UTC Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.109353 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.122198 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.131983 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.141936 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.151014 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.155011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.155045 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.155056 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.155074 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.155085 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.163344 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.179393 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.187383 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.196257 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.211545 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.221680 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.231274 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.240634 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.250320 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.256566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.256597 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.256606 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.256621 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.256630 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.259700 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:29Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.358735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.358777 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.358789 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.358803 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.358814 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.461176 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.461219 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.461229 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.461244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.461252 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.563249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.563284 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.563295 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.563310 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.563319 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.665042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.665074 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.665084 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.665100 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.665111 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.766600 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.766632 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.766640 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.766653 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.766662 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.868730 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.868763 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.868773 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.868788 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.868796 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.971218 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.971280 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.971297 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.971320 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:29 crc kubenswrapper[4791]: I0218 00:35:29.971337 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:29Z","lastTransitionTime":"2026-02-18T00:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.060466 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:30 crc kubenswrapper[4791]: E0218 00:35:30.060670 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.074806 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.074854 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.074867 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.074884 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.074901 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.108226 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 06:00:27.167827763 +0000 UTC Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.177012 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.177074 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.177093 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.177119 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.177137 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.280283 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.280320 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.280329 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.280365 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.280375 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.383089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.383140 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.383171 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.383194 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.383209 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.486197 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.486278 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.486301 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.486332 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.486355 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.589098 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.589151 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.589192 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.589215 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.589232 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.692196 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.692241 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.692258 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.692280 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.692299 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.794980 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.795028 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.795042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.795065 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.795081 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.897193 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.897224 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.897236 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.897250 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.897261 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.999201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.999241 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.999249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.999266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:30 crc kubenswrapper[4791]: I0218 00:35:30.999275 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:30Z","lastTransitionTime":"2026-02-18T00:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.060794 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.060820 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.060876 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:31 crc kubenswrapper[4791]: E0218 00:35:31.060967 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:31 crc kubenswrapper[4791]: E0218 00:35:31.061180 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:31 crc kubenswrapper[4791]: E0218 00:35:31.061136 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.101535 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.101610 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.101632 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.101660 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.101684 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.108901 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 02:49:02.153851448 +0000 UTC Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.203989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.204059 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.204081 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.204108 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.204129 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.306392 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.306438 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.306452 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.306467 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.306476 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.722523 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.722561 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.722572 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.722603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.722616 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.825488 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.825531 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.825540 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.825554 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.825564 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.927631 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.927678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.927687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.927703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:31 crc kubenswrapper[4791]: I0218 00:35:31.927712 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:31Z","lastTransitionTime":"2026-02-18T00:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.029866 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.029926 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.029943 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.029968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.029987 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.060226 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:32 crc kubenswrapper[4791]: E0218 00:35:32.060438 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.109508 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 12:16:06.704300161 +0000 UTC Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.132518 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.132549 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.132560 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.132574 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.132582 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.235575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.235628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.235639 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.235654 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.235666 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.337894 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.337938 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.337948 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.337964 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.337975 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.383123 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:32 crc kubenswrapper[4791]: E0218 00:35:32.383561 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:32 crc kubenswrapper[4791]: E0218 00:35:32.383664 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:04.383634415 +0000 UTC m=+105.951647595 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.440012 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.440055 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.440068 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.440086 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.440098 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.543132 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.543252 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.543268 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.543288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.543302 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.645687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.645717 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.645726 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.645739 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.645748 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.748396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.748436 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.748447 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.748465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.748477 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.851431 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.851507 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.851526 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.851552 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.851573 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.953705 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.953747 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.953755 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.953770 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:32 crc kubenswrapper[4791]: I0218 00:35:32.953779 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:32Z","lastTransitionTime":"2026-02-18T00:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.056772 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.056838 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.056850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.056869 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.056881 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.061067 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.061101 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.061066 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:33 crc kubenswrapper[4791]: E0218 00:35:33.061193 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:33 crc kubenswrapper[4791]: E0218 00:35:33.061290 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:33 crc kubenswrapper[4791]: E0218 00:35:33.061352 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.109811 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 17:42:02.607266931 +0000 UTC Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.159203 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.159244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.159254 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.159273 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.159284 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.261837 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.261874 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.261885 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.261900 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.261910 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.363994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.364038 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.364050 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.364068 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.364081 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.466202 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.466253 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.466264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.466284 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.466297 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.568243 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.568286 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.568298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.568313 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.568324 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.671032 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.671094 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.671116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.671141 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.671186 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.730363 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/0.log" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.730420 4791 generic.go:334] "Generic (PLEG): container finished" podID="83bdb769-59eb-4472-ba08-be5897ee2cd6" containerID="773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26" exitCode=1 Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.730455 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerDied","Data":"773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.730851 4791 scope.go:117] "RemoveContainer" containerID="773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.748571 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.766700 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.774416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.774467 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.774487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.774511 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.774530 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.783732 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.800225 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.814776 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.826775 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.846656 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.863555 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.882909 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.882991 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.883007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.883027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.883040 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.889211 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.907081 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.923056 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.938907 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.956336 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.968410 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.982488 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.986238 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.986291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.986310 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.986353 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:33 crc kubenswrapper[4791]: I0218 00:35:33.986375 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:33Z","lastTransitionTime":"2026-02-18T00:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.003612 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:33Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.036890 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.054779 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.061003 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:34 crc kubenswrapper[4791]: E0218 00:35:34.061139 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.088634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.088663 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.088681 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.088699 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.088709 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.110287 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 12:32:53.681756467 +0000 UTC Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.191778 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.191803 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.191812 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.191824 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.191833 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.293742 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.293807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.293824 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.293851 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.293868 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.396435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.396483 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.396495 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.396512 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.396524 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.498952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.498999 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.499011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.499028 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.499040 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.601071 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.601113 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.601122 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.601137 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.601147 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.703594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.703628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.703638 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.703652 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.703663 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.735200 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/0.log" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.735249 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerStarted","Data":"63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.752471 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.765937 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.780084 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.792111 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.806736 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.806795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.806812 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.806831 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.806845 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.807626 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.819664 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.832286 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.844784 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.861405 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.889727 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.902113 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.909550 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.909588 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.909600 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.909617 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.909630 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:34Z","lastTransitionTime":"2026-02-18T00:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.934449 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.951740 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.964703 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.978233 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:34 crc kubenswrapper[4791]: I0218 00:35:34.993406 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:34Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.003525 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:35Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.012099 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.012138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.012150 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.012191 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.012203 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.018719 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:35Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.060917 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.060964 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.060922 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:35 crc kubenswrapper[4791]: E0218 00:35:35.061059 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:35 crc kubenswrapper[4791]: E0218 00:35:35.061292 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:35 crc kubenswrapper[4791]: E0218 00:35:35.061421 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.111268 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 07:53:40.636826969 +0000 UTC Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.117856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.117886 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.117895 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.117909 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.117920 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.221331 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.221435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.221461 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.221492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.221516 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.323961 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.324015 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.324027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.324045 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.324057 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.425930 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.425989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.426005 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.426025 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.426040 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.528593 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.528634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.528645 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.528680 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.528694 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.631756 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.631809 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.631825 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.631846 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.631861 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.734459 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.734485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.734500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.734515 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.734524 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.837390 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.837443 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.837461 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.837484 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.837502 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.940485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.940549 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.940570 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.940599 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:35 crc kubenswrapper[4791]: I0218 00:35:35.940620 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:35Z","lastTransitionTime":"2026-02-18T00:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.043616 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.043690 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.043714 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.043743 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.043766 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.061153 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.061386 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.111669 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 07:06:03.864997799 +0000 UTC Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.145282 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.145326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.145344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.145364 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.145378 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.248210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.248279 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.248294 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.248311 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.248323 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.351392 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.351452 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.351468 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.351492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.351510 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.454435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.454464 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.454472 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.454485 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.454493 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.556802 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.556866 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.556882 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.556905 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.556922 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.659634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.659676 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.659688 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.659706 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.659718 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.711683 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.711726 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.711739 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.711755 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.711769 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.725719 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:36Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.729560 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.729637 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.729660 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.729682 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.729696 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.742572 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:36Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.746140 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.746203 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.746218 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.746236 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.746251 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.757566 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:36Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.761149 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.761212 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.761225 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.761241 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.761252 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.773672 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:36Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.776616 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.776658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.776672 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.776690 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.776704 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.788210 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:36Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:36 crc kubenswrapper[4791]: E0218 00:35:36.788354 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.789794 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.789832 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.789843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.789861 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.789874 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.896392 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.897343 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.897385 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.897407 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:36 crc kubenswrapper[4791]: I0218 00:35:36.897420 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:36Z","lastTransitionTime":"2026-02-18T00:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.000295 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.000363 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.000382 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.000409 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.000426 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.061064 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.061268 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:37 crc kubenswrapper[4791]: E0218 00:35:37.061345 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.061381 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:37 crc kubenswrapper[4791]: E0218 00:35:37.061463 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:37 crc kubenswrapper[4791]: E0218 00:35:37.061565 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.103575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.103657 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.103672 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.103693 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.103705 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.111839 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 21:39:03.569129539 +0000 UTC Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.205912 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.205983 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.206008 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.206038 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.206061 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.308249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.308291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.308302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.308317 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.308330 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.410209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.410252 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.410262 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.410279 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.410288 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.513086 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.513211 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.513240 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.513271 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.513303 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.615621 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.615683 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.615717 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.615748 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.615770 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.718658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.718713 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.718727 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.718745 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.718758 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.821348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.821395 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.821409 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.821428 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.821443 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.923984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.924031 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.924039 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.924053 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:37 crc kubenswrapper[4791]: I0218 00:35:37.924062 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:37Z","lastTransitionTime":"2026-02-18T00:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.027378 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.027432 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.027450 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.027469 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.027485 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.060837 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:38 crc kubenswrapper[4791]: E0218 00:35:38.060996 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.112228 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 03:23:56.781316625 +0000 UTC Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.130288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.130334 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.130349 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.130367 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.130379 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.232623 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.232671 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.232682 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.232700 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.232715 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.335070 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.335145 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.335225 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.335266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.335292 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.437757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.437810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.437821 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.437839 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.437850 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.540631 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.540670 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.540680 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.540698 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.540709 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.643881 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.643945 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.643968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.643998 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.644018 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.746150 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.746248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.746266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.746292 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.746317 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.849100 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.849151 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.849182 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.849200 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.849211 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.951470 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.951522 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.951538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.951558 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:38 crc kubenswrapper[4791]: I0218 00:35:38.951585 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:38Z","lastTransitionTime":"2026-02-18T00:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.053877 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.053910 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.053921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.053934 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.053943 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.060308 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:39 crc kubenswrapper[4791]: E0218 00:35:39.060402 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.060502 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.060530 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:39 crc kubenswrapper[4791]: E0218 00:35:39.060678 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:39 crc kubenswrapper[4791]: E0218 00:35:39.061017 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.061341 4791 scope.go:117] "RemoveContainer" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.070720 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.080825 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.102797 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.112728 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 07:39:15.929242366 +0000 UTC Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.115926 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.127681 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.139185 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.151778 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.159716 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.159746 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.159753 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.159766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.159789 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.161774 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.172361 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.183781 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.200980 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.211811 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.221586 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.239125 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.250940 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.260589 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.262562 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.262582 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.262589 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.262603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.262612 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.271576 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.284190 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.301553 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.364908 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.364949 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.364961 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.364975 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.364986 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.468240 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.468268 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.468276 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.468288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.468298 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.570375 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.570408 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.570416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.570431 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.570441 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.672673 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.672719 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.672728 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.672741 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.672751 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.750563 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/2.log" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.753360 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.753843 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.766743 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.775223 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.775333 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.775348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.775376 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.775390 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.779583 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.788770 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.798537 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.808210 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f5e90b2-1379-4a83-8139-11ef1558945e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a695212b2ac3fbc5ae5b16df68a833130025c3c7aeb7e1ea2e02f36ee3b96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.826569 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.839560 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.855064 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.875151 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.877588 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.877804 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.877814 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.877829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.877837 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.891207 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.908769 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.919173 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.929051 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.939386 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.958175 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.970403 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.980905 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.980963 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.980977 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.980998 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.981011 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:39Z","lastTransitionTime":"2026-02-18T00:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.981926 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:39 crc kubenswrapper[4791]: I0218 00:35:39.993727 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:39Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.005331 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.061070 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:40 crc kubenswrapper[4791]: E0218 00:35:40.061255 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.083545 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.083598 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.083610 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.083626 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.083636 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.113704 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 11:57:52.799316531 +0000 UTC Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.186078 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.186108 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.186118 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.186132 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.186141 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.288541 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.288583 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.288594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.288609 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.288621 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.390631 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.390665 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.390673 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.390687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.390696 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.493014 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.493055 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.493066 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.493091 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.493103 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.595554 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.595595 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.595603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.595619 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.595629 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.699013 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.699656 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.699844 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.700035 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.700222 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.759800 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/3.log" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.760899 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/2.log" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.766828 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" exitCode=1 Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.766895 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.766951 4791 scope.go:117] "RemoveContainer" containerID="be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.768486 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:35:40 crc kubenswrapper[4791]: E0218 00:35:40.768869 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.785491 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.804334 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.804389 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.804412 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.804440 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.804465 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.809858 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.846249 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.869957 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.891509 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.907802 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.907869 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.907894 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.907922 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.907944 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:40Z","lastTransitionTime":"2026-02-18T00:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.911402 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.929388 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.943131 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.956400 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.968398 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.980418 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:40 crc kubenswrapper[4791]: I0218 00:35:40.991839 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f5e90b2-1379-4a83-8139-11ef1558945e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a695212b2ac3fbc5ae5b16df68a833130025c3c7aeb7e1ea2e02f36ee3b96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.010757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.010807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.010821 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.010840 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.010852 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.011479 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.025477 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.038719 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.052911 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.060621 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.060698 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.060621 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:41 crc kubenswrapper[4791]: E0218 00:35:41.060820 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:41 crc kubenswrapper[4791]: E0218 00:35:41.061037 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:41 crc kubenswrapper[4791]: E0218 00:35:41.061275 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.064302 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.097378 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.113553 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.113589 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.113599 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.113613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.113624 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.114133 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 08:40:00.677254837 +0000 UTC Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.122180 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://be8e36e9f03120ba567db12352eb82cd8116814e73026fe1466f3bc562fa4f29\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:13Z\\\",\\\"message\\\":\\\"] Retry successful for *v1.Pod openshift-dns/node-resolver-cg5l2 after 0 failed attempt(s)\\\\nI0218 00:35:12.890042 6469 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-cg5l2\\\\nF0218 00:35:12.889620 6469 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:12Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:12.890050 6469 base_network_controller_pods.go:916] Annotation values: ip=[10.217.0.3/23] ; mac=0a:58:0a:d9:00:03 ; gw=[10.217.0.1]\\\\nI0218 00:35:12.889327 6469 services_controller.go:443] Built service openshift-authentication-operator/metrics LB cluster-wide configs for networ\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:40Z\\\",\\\"message\\\":\\\"ult: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.93\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5000, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0218 00:35:40.142040 6856 lb_config.go:1031] Cluster endpoints for openshift-kube-apiserver-operator/metrics for network=default are: map[]\\\\nF0218 00:35:40.142043 6856 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:40.142048 6856 services_controller.go:443\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.216262 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.216302 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.216312 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.216326 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.216335 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.319059 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.319105 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.319116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.319133 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.319145 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.423070 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.423109 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.423118 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.423132 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.423142 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.527513 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.527557 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.527566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.527581 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.527591 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.630613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.630678 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.630697 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.630721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.630739 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.733868 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.733921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.733931 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.733947 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.733956 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.771872 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/3.log" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.776152 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:35:41 crc kubenswrapper[4791]: E0218 00:35:41.776494 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.786894 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.796561 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.809242 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.829849 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.836757 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.836803 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.836820 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.836845 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.836863 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.843222 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.856209 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.869434 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.883184 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.895605 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.909518 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.919493 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.931700 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.939850 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.939903 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.939914 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.939930 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.939939 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:41Z","lastTransitionTime":"2026-02-18T00:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.943978 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f5e90b2-1379-4a83-8139-11ef1558945e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a695212b2ac3fbc5ae5b16df68a833130025c3c7aeb7e1ea2e02f36ee3b96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:41 crc kubenswrapper[4791]: I0218 00:35:41.956547 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:41.970142 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:41.981765 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:41.994305 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:41Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.010140 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.031446 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:40Z\\\",\\\"message\\\":\\\"ult: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.93\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5000, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0218 00:35:40.142040 6856 lb_config.go:1031] Cluster endpoints for openshift-kube-apiserver-operator/metrics for network=default are: map[]\\\\nF0218 00:35:40.142043 6856 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:40.142048 6856 services_controller.go:443\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:42Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.041921 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.041945 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.041953 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.041966 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.041976 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.060315 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:42 crc kubenswrapper[4791]: E0218 00:35:42.060414 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.114747 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 11:41:42.455153788 +0000 UTC Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.144777 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.144874 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.144900 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.144946 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.144971 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.247073 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.247117 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.247138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.247197 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.247223 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.350538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.350601 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.350623 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.350651 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.350673 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.453142 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.453226 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.453244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.453271 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.453289 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.555882 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.555942 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.555962 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.555989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.556008 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.658489 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.658538 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.658552 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.658570 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.658580 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.762083 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.762190 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.762210 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.762235 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.762253 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.864935 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.865004 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.865027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.865058 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.865083 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.968122 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.968237 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.968256 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.968280 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:42 crc kubenswrapper[4791]: I0218 00:35:42.968296 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:42Z","lastTransitionTime":"2026-02-18T00:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.002797 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.002931 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.002960 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.002996 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.003014 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003126 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003123 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:47.00308542 +0000 UTC m=+148.571098630 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003125 4791 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003231 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003253 4791 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003273 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:47.003254445 +0000 UTC m=+148.571267655 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003267 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003403 4791 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003141 4791 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003458 4791 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003373 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:47.003344477 +0000 UTC m=+148.571357777 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003489 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:47.003478192 +0000 UTC m=+148.571491502 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.003506 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:47.003498632 +0000 UTC m=+148.571511933 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.061039 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.061080 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.061083 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.061253 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.061337 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:43 crc kubenswrapper[4791]: E0218 00:35:43.061424 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.069791 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.069840 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.069856 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.069881 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.069898 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.115567 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 07:15:36.893911754 +0000 UTC Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.172918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.172965 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.172975 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.172989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.173000 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.275835 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.276053 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.276176 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.276251 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.276315 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.379734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.379788 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.379806 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.379840 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.379859 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.482744 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.482834 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.482855 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.482884 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.482903 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.585634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.585992 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.586010 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.586027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.586038 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.689063 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.689109 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.689121 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.689138 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.689151 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.792568 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.792613 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.792628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.792645 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.792657 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.895340 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.895411 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.895435 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.895464 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.895488 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.999422 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.999491 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.999508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.999534 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:43 crc kubenswrapper[4791]: I0218 00:35:43.999551 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:43Z","lastTransitionTime":"2026-02-18T00:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.060414 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:44 crc kubenswrapper[4791]: E0218 00:35:44.060629 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.102920 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.102989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.103011 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.103042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.103061 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.116805 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 15:58:40.390273707 +0000 UTC Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.205702 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.205751 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.205766 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.205785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.205817 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.307621 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.307663 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.307676 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.307696 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.307710 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.410591 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.410641 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.410652 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.410670 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.410682 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.514054 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.514127 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.514151 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.514258 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.514283 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.616697 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.616754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.616775 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.616807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.616829 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.719859 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.719918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.719935 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.719958 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.719976 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.823809 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.823913 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.823934 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.823968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.823988 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.927130 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.927270 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.927289 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.927322 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:44 crc kubenswrapper[4791]: I0218 00:35:44.927345 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:44Z","lastTransitionTime":"2026-02-18T00:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.030819 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.030907 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.030916 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.030934 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.030944 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.060478 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.060490 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:45 crc kubenswrapper[4791]: E0218 00:35:45.060603 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.060629 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:45 crc kubenswrapper[4791]: E0218 00:35:45.060908 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:45 crc kubenswrapper[4791]: E0218 00:35:45.060958 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.117582 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 22:38:56.579341771 +0000 UTC Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.133353 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.133418 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.133436 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.133468 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.133492 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.236454 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.236525 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.236546 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.236625 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.236648 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.339579 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.339658 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.339679 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.339709 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.339732 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.442114 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.442176 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.442201 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.442224 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.442235 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.544888 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.544947 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.544963 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.544989 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.545005 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.647648 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.647726 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.647744 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.647795 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.647812 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.750829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.750898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.750918 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.750944 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.750964 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.852698 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.852734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.852742 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.852754 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.852763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.955075 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.955130 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.955148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.955191 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:45 crc kubenswrapper[4791]: I0218 00:35:45.955208 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:45Z","lastTransitionTime":"2026-02-18T00:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.057965 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.058007 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.058019 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.058035 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.058052 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.060377 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:46 crc kubenswrapper[4791]: E0218 00:35:46.060497 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.117900 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 17:48:57.490482094 +0000 UTC Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.160364 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.160401 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.160411 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.160426 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.160438 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.263625 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.263695 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.263716 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.263743 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.263763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.366783 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.366829 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.366845 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.366867 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.366883 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.469539 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.469588 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.469606 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.469629 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.469649 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.573076 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.573133 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.573154 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.573206 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.573224 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.675863 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.675929 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.675947 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.675972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.675991 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.778956 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.779013 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.779030 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.779053 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.779071 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.882223 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.882288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.882304 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.882327 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.882344 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.984919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.984982 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.985004 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.985034 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:46 crc kubenswrapper[4791]: I0218 00:35:46.985062 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:46Z","lastTransitionTime":"2026-02-18T00:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.060709 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.060729 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.060883 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.061004 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.061075 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.061221 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.087576 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.087628 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.087645 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.087667 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.087683 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.119055 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 06:50:34.031384035 +0000 UTC Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.151196 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.151249 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.151266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.151288 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.151304 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.171055 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.176331 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.176393 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.176418 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.176445 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.176466 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.199745 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.208388 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.208461 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.208484 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.208553 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.208578 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.229252 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.232081 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.232194 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.232221 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.232252 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.232280 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.251741 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.255268 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.255298 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.255307 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.255322 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.255333 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.267329 4791 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"49ba9a08-9d87-4573-b809-fff0547601af\\\",\\\"systemUUID\\\":\\\"e1be6815-4e1d-4d7f-9918-bb66fef09f86\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:47Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:47 crc kubenswrapper[4791]: E0218 00:35:47.267438 4791 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.268777 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.268800 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.268809 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.268820 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.268828 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.370767 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.370796 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.370806 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.370819 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.370827 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.473102 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.473136 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.473145 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.473178 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.473191 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.576520 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.576563 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.576574 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.576590 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.576601 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.679214 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.679254 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.679265 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.679282 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.679295 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.781999 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.782073 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.782098 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.782134 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.782198 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.885013 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.885081 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.885103 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.885134 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.885201 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.987356 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.987422 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.987441 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.987466 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:47 crc kubenswrapper[4791]: I0218 00:35:47.987483 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:47Z","lastTransitionTime":"2026-02-18T00:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.060513 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:48 crc kubenswrapper[4791]: E0218 00:35:48.060678 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.090065 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.090118 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.090129 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.090148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.090185 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.119825 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 09:06:35.064986484 +0000 UTC Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.192807 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.192852 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.192864 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.192883 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.192896 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.296361 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.296450 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.296475 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.296506 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.296529 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.399662 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.399706 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.399714 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.399728 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.399740 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.502635 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.502703 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.502721 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.502746 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.502762 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.605068 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.605105 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.605115 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.605131 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.605141 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.707361 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.707406 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.707416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.707431 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.707439 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.810248 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.810277 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.810285 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.810300 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.810309 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.914333 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.914916 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.915451 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.915664 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:48 crc kubenswrapper[4791]: I0218 00:35:48.916735 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:48Z","lastTransitionTime":"2026-02-18T00:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.019631 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.019680 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.019694 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.019716 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.019728 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.060639 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.060684 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.060653 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:49 crc kubenswrapper[4791]: E0218 00:35:49.060808 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:49 crc kubenswrapper[4791]: E0218 00:35:49.060951 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:49 crc kubenswrapper[4791]: E0218 00:35:49.061047 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.078039 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a87683607fc8e6f28f33e54c12306340482f88e756735f12e90546e21fab5348\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0638b93c03b9d8d3ce702679c72973f80297da2bc1b7cb7686e35a92aeaccf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.095984 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-d2kpn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83bdb769-59eb-4472-ba08-be5897ee2cd6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:33Z\\\",\\\"message\\\":\\\"2026-02-18T00:34:47+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a\\\\n2026-02-18T00:34:47+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_901bd898-ae6d-42ed-9223-074d9c86238a to /host/opt/cni/bin/\\\\n2026-02-18T00:34:48Z [verbose] multus-daemon started\\\\n2026-02-18T00:34:48Z [verbose] Readiness Indicator file check\\\\n2026-02-18T00:35:33Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hg6c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-d2kpn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.116687 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-cg5l2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cee2e3b0-8c1a-4fe7-86ba-1e54cbdd33e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7aa04878bf8d787366f7a951ae7bb9daf6c4bdc56d79b0e487d137fda064c277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpnzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-cg5l2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.119999 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 18:52:15.982602997 +0000 UTC Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.121819 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.121858 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.121871 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.121887 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.121899 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.130617 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cq6jj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e5ba5a-400b-42ef-bc1a-f98f45a2e227\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a5f72cfc5bc6d0d3ce9a9b929eb8f81b9044509da4761c2b0c0cb0c78aa812c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4mlx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:47Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cq6jj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.145922 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02f6ba8d-1be3-44e1-b1bf-21c188e59802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa464b32325197259edb9689c1d43c14a3f3144080177d3d33eb5ea4b79a3462\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eee7f98bfe1c0ed68a17d92c77a0a84f087691e6b789f13cb0c8f14de244f009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c4sc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:58Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zq7r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.179532 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c6429fb-ad2a-4a2b-a36a-683a0ece97a5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2395b5ade6cfee70f172dc93b0793da906689b4e85af0c2a8a686cbbe9cc26c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b6c64c12ec9b44c0b84b5cb5681187a077746a5748438eda9aba0ec340ec62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4821c3e7bdd407f7f7438e67100dadeec0b5a81b97faae4b004e99fa01ee4ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca2770a6efebbd823d23803d7ab2e95dc0973add4cd4773852d8c0865cb2ff81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bbafc8aa212e3e790067c269a33699d6746956887e9f4cd98f4801f4e3f7c28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6efe5cb5d5f9376bd349ca651687f1351776c014b12a33d590deaaadcc02c968\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2cac98b45b5aac0f55a878660d55fdc72256a7dc13d7ec073031e6f493a53fe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4e5e24bed9f254bfb4c4531756acd9f0084d633e42f3d5ddde920f8db7496dc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.200726 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e73685182640ca6f57dfe831ecac9031ce6707035e97d795b667dc0bce94502\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.221512 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.224987 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.225030 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.225047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.225071 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.225090 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.239205 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.251714 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa79a4e65cb9ac0f9ed5b6c0332e8d1a223af6bcc9733397f2f8f664e34b787\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.263179 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8295587f-c41f-4f35-bfad-bc54d58d047c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7227e5ab7b9fa0410752947b534340d7033f38af5ea7179f07078ec9e561ac2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22a4a7ad6c6d5a8547c420da7822e0efd3617c4c3d44fb6aca96e019564d0e3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b98d760307fb74389d3eb9487cb79e6b24b2e8a4483c5c7d1f85073375bfce93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fe785fada613c75ca071e74b230919a697ca990f310f4c07afbfe8fb2e6d4d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.273594 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.284834 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0b31a333-8f95-459c-8135-e91e557c4c85\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ebd0f5d677f875f5c5fe2b00c19a8a133c0524373b75be57ae4d9e4159183bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cxtwt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-bhfmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.294812 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jq75l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afcf9ee1-4224-441c-a98d-9330bed34065\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55h6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:35:00Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jq75l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.304585 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f5e90b2-1379-4a83-8139-11ef1558945e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5a695212b2ac3fbc5ae5b16df68a833130025c3c7aeb7e1ea2e02f36ee3b96c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f7bf77d75850f01ea3e9d8ee0ca3e949c3f95d621437ec1a893cd1dc5c58916\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.317915 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a468f665-a84a-4bfc-9a2c-672ad3c3cba6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-18T00:34:39Z\\\",\\\"message\\\":\\\"le observer\\\\nW0218 00:34:38.835723 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0218 00:34:38.835899 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0218 00:34:38.836720 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2857718687/tls.crt::/tmp/serving-cert-2857718687/tls.key\\\\\\\"\\\\nI0218 00:34:39.149864 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0218 00:34:39.160059 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0218 00:34:39.160084 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0218 00:34:39.160111 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0218 00:34:39.160120 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0218 00:34:39.173782 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0218 00:34:39.173808 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173813 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0218 00:34:39.173817 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0218 00:34:39.173820 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0218 00:34:39.173822 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0218 00:34:39.173825 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0218 00:34:39.173989 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0218 00:34:39.174552 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:33Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.327867 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.327915 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.327932 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.327954 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.327969 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.333046 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4aff489-30bb-4177-9e0c-74b64442ec99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://303b30932350aee195f7d3016b3f77d80651962d7ac3fcdf3f9bc6c3068edd05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bbe6dc21c39d11a0afbc565e2a72fb8086857266a7c6bee6ca63446e11dcfbe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c929513a66d49109c0af20776d6f39d9a7c553d286245327cc61c7fd1d97bbb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:19Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.348482 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vnz85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb393ddf-cece-42f2-8d94-c88a3d536802\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9485cfe0fe97814fa9ae31afb489a0e7eeea80a1ffb3501d473863a81ea8ed44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7ac765c60ad84ab02e9baf4c34874fafac89502d8ce00a2ab435ff555b889df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd12fe7b78495c4200f69b875191cd2bac1383e1552369c194cec2ecba011309\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7704a384c099b15db76fb7afc7f209707a829003c01a152ebe5a6e841a5774\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b931b62d82373d39354aa774600e3e181b3140dfa5f6bd623cc793c8b07a4a41\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a662da67a9353a00c5f8f090b6eed29360ec083168e82fb39ed1fcb9d54c7f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee83fee34543613544df669db7df4da1fa69ec8b1309bfb73f3c8a0b6d58a859\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwvrc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vnz85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.371563 4791 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-02-18T00:35:40Z\\\",\\\"message\\\":\\\"ult: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.93\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5000, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0218 00:35:40.142040 6856 lb_config.go:1031] Cluster endpoints for openshift-kube-apiserver-operator/metrics for network=default are: map[]\\\\nF0218 00:35:40.142043 6856 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:40Z is after 2025-08-24T17:21:41Z]\\\\nI0218 00:35:40.142048 6856 services_controller.go:443\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-18T00:35:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-18T00:34:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-18T00:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-18T00:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xql4c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-18T00:34:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tr5hg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-02-18T00:35:49Z is after 2025-08-24T17:21:41Z" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.430979 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.431027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.431036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.431052 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.431061 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.533624 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.533655 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.533663 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.533676 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.533686 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.635855 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.635898 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.635907 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.635922 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.635934 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.738339 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.738377 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.738387 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.738401 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.738410 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.840423 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.840487 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.840505 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.840536 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.840562 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.943244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.943293 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.943305 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.943322 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:49 crc kubenswrapper[4791]: I0218 00:35:49.943333 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:49Z","lastTransitionTime":"2026-02-18T00:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.045712 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.045813 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.045832 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.045857 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.045873 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.060130 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:50 crc kubenswrapper[4791]: E0218 00:35:50.060356 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.120947 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 10:01:23.080605131 +0000 UTC Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.147990 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.148055 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.148073 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.148097 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.148113 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.256801 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.256883 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.256908 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.256937 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.256954 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.360150 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.360263 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.360289 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.360322 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.360342 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.462680 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.462990 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.463374 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.463558 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.463801 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.566459 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.566530 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.566554 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.566582 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.566604 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.669566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.669622 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.669638 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.669660 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.669677 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.772869 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.772948 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.772972 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.773009 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.773034 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.876051 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.876125 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.876148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.876209 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.876231 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.979443 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.979492 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.979508 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.979530 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:50 crc kubenswrapper[4791]: I0218 00:35:50.979548 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:50Z","lastTransitionTime":"2026-02-18T00:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.060658 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.060690 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.060668 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:51 crc kubenswrapper[4791]: E0218 00:35:51.060784 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:51 crc kubenswrapper[4791]: E0218 00:35:51.060942 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:51 crc kubenswrapper[4791]: E0218 00:35:51.061128 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.081566 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.081626 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.081638 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.081656 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.081668 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.121696 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 16:55:31.594056412 +0000 UTC Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.184307 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.184345 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.184357 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.184373 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.184385 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.285971 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.286008 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.286017 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.286030 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.286045 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.389001 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.389047 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.389058 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.389073 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.389084 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.492410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.492496 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.492522 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.492552 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.492574 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.595424 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.595482 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.595500 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.595524 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.595542 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.697689 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.697734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.697745 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.697761 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.697774 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.799667 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.799697 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.799714 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.799728 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.799738 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.901367 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.901400 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.901410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.901425 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:51 crc kubenswrapper[4791]: I0218 00:35:51.901436 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:51Z","lastTransitionTime":"2026-02-18T00:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.004931 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.004981 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.004998 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.005022 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.005038 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.060139 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:52 crc kubenswrapper[4791]: E0218 00:35:52.060325 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.107798 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.107831 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.107843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.107865 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.107881 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.122409 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 17:24:57.783164969 +0000 UTC Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.210917 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.210951 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.210962 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.210976 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.210987 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.313771 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.313839 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.313857 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.313880 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.313899 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.416284 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.416334 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.416348 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.416367 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.416381 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.518981 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.519036 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.519048 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.519065 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.519076 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.621331 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.621417 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.621441 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.621483 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.621502 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.725189 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.725247 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.725265 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.725292 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.725309 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.827798 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.827858 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.827875 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.827903 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.827922 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.931616 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.931715 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.931735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.931760 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:52 crc kubenswrapper[4791]: I0218 00:35:52.931780 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:52Z","lastTransitionTime":"2026-02-18T00:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.034742 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.034801 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.034820 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.034843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.034861 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.060703 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.060730 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.060913 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:53 crc kubenswrapper[4791]: E0218 00:35:53.061110 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:53 crc kubenswrapper[4791]: E0218 00:35:53.061593 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:53 crc kubenswrapper[4791]: E0218 00:35:53.061362 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.123833 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 06:25:41.864563686 +0000 UTC Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.138041 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.138078 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.138089 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.138109 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.138121 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.241264 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.241310 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.241323 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.241343 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.241357 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.344056 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.344105 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.344116 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.344139 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.344177 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.446569 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.446634 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.446657 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.446687 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.446708 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.549528 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.549587 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.549603 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.549623 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.549639 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.652963 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.653031 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.653042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.653067 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.653083 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.755563 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.755595 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.755604 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.755618 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.755625 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.858291 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.858345 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.858355 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.858369 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.858378 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.961544 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.961584 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.961592 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.961606 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:53 crc kubenswrapper[4791]: I0218 00:35:53.961614 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:53Z","lastTransitionTime":"2026-02-18T00:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.060767 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:54 crc kubenswrapper[4791]: E0218 00:35:54.061308 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.063606 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.063638 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.063651 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.063665 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.063675 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.124751 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 10:59:07.880499622 +0000 UTC Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.165884 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.165950 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.165968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.165990 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.166007 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.268105 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.268236 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.268266 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.268303 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.268336 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.370959 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.371009 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.371018 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.371031 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.371040 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.472919 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.473020 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.473038 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.473061 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.473078 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.575368 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.575407 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.575417 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.575433 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.575444 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.678351 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.678679 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.678794 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.678930 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.679059 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.781723 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.781785 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.781802 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.781823 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.781840 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.884984 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.885195 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.885287 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.885357 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.885458 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.987952 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.987994 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.988003 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.988016 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:54 crc kubenswrapper[4791]: I0218 00:35:54.988026 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:54Z","lastTransitionTime":"2026-02-18T00:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.060426 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.060464 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.060555 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:55 crc kubenswrapper[4791]: E0218 00:35:55.060705 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:55 crc kubenswrapper[4791]: E0218 00:35:55.060884 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:55 crc kubenswrapper[4791]: E0218 00:35:55.061123 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.090461 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.090551 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.090575 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.090608 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.090631 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.124985 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 05:24:22.605541196 +0000 UTC Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.193544 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.193571 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.193581 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.193594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.193603 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.297034 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.297103 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.297121 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.297148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.297212 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.400454 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.400661 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.400688 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.400842 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.400932 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.503465 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.503734 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.503890 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.504068 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.504194 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.606649 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.606928 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.606997 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.607060 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.607118 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.709922 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.709962 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.709976 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.709995 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.710007 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.813111 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.813152 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.813179 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.813205 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.813215 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.916290 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.916418 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.916437 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.916460 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:55 crc kubenswrapper[4791]: I0218 00:35:55.916478 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:55Z","lastTransitionTime":"2026-02-18T00:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.019398 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.019450 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.019466 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.019489 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.019505 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.060300 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:56 crc kubenswrapper[4791]: E0218 00:35:56.060469 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.121740 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.121781 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.121793 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.121810 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.121822 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.127107 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 02:40:54.523089533 +0000 UTC Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.226410 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.226507 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.226536 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.226569 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.226592 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.329448 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.329542 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.329562 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.329594 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.329616 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.433244 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.433316 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.433339 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.433416 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.433477 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.535904 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.535944 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.535956 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.535974 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.535984 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.638334 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.638367 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.638378 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.638396 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.638407 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.741748 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.741822 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.741843 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.741870 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.741908 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.845943 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.846025 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.846052 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.846092 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.846119 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.949057 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.949127 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.949148 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.949207 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:56 crc kubenswrapper[4791]: I0218 00:35:56.949230 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:56Z","lastTransitionTime":"2026-02-18T00:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.051968 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.052014 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.052025 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.052042 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.052055 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:57Z","lastTransitionTime":"2026-02-18T00:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.060677 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.060750 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:57 crc kubenswrapper[4791]: E0218 00:35:57.060975 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.061268 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:57 crc kubenswrapper[4791]: E0218 00:35:57.061386 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:57 crc kubenswrapper[4791]: E0218 00:35:57.061555 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.062610 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:35:57 crc kubenswrapper[4791]: E0218 00:35:57.062880 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.127333 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 22:52:08.316152991 +0000 UTC Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.156946 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.156995 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.157009 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.157027 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.157039 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:57Z","lastTransitionTime":"2026-02-18T00:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.259271 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.259315 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.259328 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.259344 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.259356 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:57Z","lastTransitionTime":"2026-02-18T00:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.355686 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.355724 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.355735 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.355751 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.355763 4791 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-18T00:35:57Z","lastTransitionTime":"2026-02-18T00:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.414621 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw"] Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.415196 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.417004 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.417367 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.418331 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.420460 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.427108 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.427512 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.427539 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.427589 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.427650 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.489330 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=49.489308031 podStartE2EDuration="49.489308031s" podCreationTimestamp="2026-02-18 00:35:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.476487822 +0000 UTC m=+99.044501022" watchObservedRunningTime="2026-02-18 00:35:57.489308031 +0000 UTC m=+99.057321211" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.519561 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podStartSLOduration=72.519539243 podStartE2EDuration="1m12.519539243s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.507336394 +0000 UTC m=+99.075349574" watchObservedRunningTime="2026-02-18 00:35:57.519539243 +0000 UTC m=+99.087552423" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528399 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528472 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528502 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528587 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528686 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528745 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.528797 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.529576 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.533839 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.545194 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=18.545132282 podStartE2EDuration="18.545132282s" podCreationTimestamp="2026-02-18 00:35:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.529970709 +0000 UTC m=+99.097983879" watchObservedRunningTime="2026-02-18 00:35:57.545132282 +0000 UTC m=+99.113145452" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.545459 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=78.545452462 podStartE2EDuration="1m18.545452462s" podCreationTimestamp="2026-02-18 00:34:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.544896864 +0000 UTC m=+99.112910034" watchObservedRunningTime="2026-02-18 00:35:57.545452462 +0000 UTC m=+99.113465632" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.551674 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8217f7f-2c69-431e-9d8b-f72e4eb2704a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7hxcw\" (UID: \"b8217f7f-2c69-431e-9d8b-f72e4eb2704a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.572150 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=73.572133294 podStartE2EDuration="1m13.572133294s" podCreationTimestamp="2026-02-18 00:34:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.557952622 +0000 UTC m=+99.125965792" watchObservedRunningTime="2026-02-18 00:35:57.572133294 +0000 UTC m=+99.140146464" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.572764 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vnz85" podStartSLOduration=72.572760173 podStartE2EDuration="1m12.572760173s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.571518315 +0000 UTC m=+99.139531505" watchObservedRunningTime="2026-02-18 00:35:57.572760173 +0000 UTC m=+99.140773343" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.617564 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-d2kpn" podStartSLOduration=72.617535789 podStartE2EDuration="1m12.617535789s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.616832848 +0000 UTC m=+99.184846018" watchObservedRunningTime="2026-02-18 00:35:57.617535789 +0000 UTC m=+99.185548959" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.626441 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-cg5l2" podStartSLOduration=72.626427277 podStartE2EDuration="1m12.626427277s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.626088376 +0000 UTC m=+99.194101546" watchObservedRunningTime="2026-02-18 00:35:57.626427277 +0000 UTC m=+99.194440447" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.633712 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-cq6jj" podStartSLOduration=72.633701474 podStartE2EDuration="1m12.633701474s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.633637662 +0000 UTC m=+99.201650832" watchObservedRunningTime="2026-02-18 00:35:57.633701474 +0000 UTC m=+99.201714644" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.671004 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=76.670983346 podStartE2EDuration="1m16.670983346s" podCreationTimestamp="2026-02-18 00:34:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.670600004 +0000 UTC m=+99.238613184" watchObservedRunningTime="2026-02-18 00:35:57.670983346 +0000 UTC m=+99.238996516" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.671604 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zq7r" podStartSLOduration=71.671597525 podStartE2EDuration="1m11.671597525s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:57.647206815 +0000 UTC m=+99.215219995" watchObservedRunningTime="2026-02-18 00:35:57.671597525 +0000 UTC m=+99.239610695" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.732008 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" Feb 18 00:35:57 crc kubenswrapper[4791]: I0218 00:35:57.839726 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" event={"ID":"b8217f7f-2c69-431e-9d8b-f72e4eb2704a","Type":"ContainerStarted","Data":"1b165a2a1b17a2f3371a2dbbed8bbf0a4991f4ba9ddb7dee0eaa81c964548933"} Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.061068 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:35:58 crc kubenswrapper[4791]: E0218 00:35:58.061372 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.127924 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 10:56:14.088647468 +0000 UTC Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.128051 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.140223 4791 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.845379 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" event={"ID":"b8217f7f-2c69-431e-9d8b-f72e4eb2704a","Type":"ContainerStarted","Data":"bb7191494ef4f0bfd322f19bcd3d287b0a9f92b90d8e72f8b916a7b1bf825504"} Feb 18 00:35:58 crc kubenswrapper[4791]: I0218 00:35:58.862740 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7hxcw" podStartSLOduration=73.862700337 podStartE2EDuration="1m13.862700337s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:35:58.862248842 +0000 UTC m=+100.430262042" watchObservedRunningTime="2026-02-18 00:35:58.862700337 +0000 UTC m=+100.430713527" Feb 18 00:35:59 crc kubenswrapper[4791]: I0218 00:35:59.060300 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:35:59 crc kubenswrapper[4791]: I0218 00:35:59.060311 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:35:59 crc kubenswrapper[4791]: I0218 00:35:59.062359 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:35:59 crc kubenswrapper[4791]: E0218 00:35:59.062881 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:35:59 crc kubenswrapper[4791]: E0218 00:35:59.062882 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:35:59 crc kubenswrapper[4791]: E0218 00:35:59.062889 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:00 crc kubenswrapper[4791]: I0218 00:36:00.060642 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:00 crc kubenswrapper[4791]: E0218 00:36:00.060759 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:01 crc kubenswrapper[4791]: I0218 00:36:01.060917 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:01 crc kubenswrapper[4791]: I0218 00:36:01.060939 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:01 crc kubenswrapper[4791]: E0218 00:36:01.061075 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:01 crc kubenswrapper[4791]: E0218 00:36:01.061153 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:01 crc kubenswrapper[4791]: I0218 00:36:01.060940 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:01 crc kubenswrapper[4791]: E0218 00:36:01.061259 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:02 crc kubenswrapper[4791]: I0218 00:36:02.060900 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:02 crc kubenswrapper[4791]: E0218 00:36:02.061022 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:03 crc kubenswrapper[4791]: I0218 00:36:03.060128 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:03 crc kubenswrapper[4791]: I0218 00:36:03.060220 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:03 crc kubenswrapper[4791]: I0218 00:36:03.060127 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:03 crc kubenswrapper[4791]: E0218 00:36:03.060239 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:03 crc kubenswrapper[4791]: E0218 00:36:03.060345 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:03 crc kubenswrapper[4791]: E0218 00:36:03.060424 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:04 crc kubenswrapper[4791]: I0218 00:36:04.060509 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:04 crc kubenswrapper[4791]: E0218 00:36:04.060725 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:04 crc kubenswrapper[4791]: I0218 00:36:04.408025 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:04 crc kubenswrapper[4791]: E0218 00:36:04.408228 4791 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:36:04 crc kubenswrapper[4791]: E0218 00:36:04.408305 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs podName:afcf9ee1-4224-441c-a98d-9330bed34065 nodeName:}" failed. No retries permitted until 2026-02-18 00:37:08.40828346 +0000 UTC m=+169.976296710 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs") pod "network-metrics-daemon-jq75l" (UID: "afcf9ee1-4224-441c-a98d-9330bed34065") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 18 00:36:05 crc kubenswrapper[4791]: I0218 00:36:05.060737 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:05 crc kubenswrapper[4791]: I0218 00:36:05.060761 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:05 crc kubenswrapper[4791]: E0218 00:36:05.061318 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:05 crc kubenswrapper[4791]: I0218 00:36:05.060825 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:05 crc kubenswrapper[4791]: E0218 00:36:05.061474 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:05 crc kubenswrapper[4791]: E0218 00:36:05.061711 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:06 crc kubenswrapper[4791]: I0218 00:36:06.061154 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:06 crc kubenswrapper[4791]: E0218 00:36:06.061412 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:07 crc kubenswrapper[4791]: I0218 00:36:07.061006 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:07 crc kubenswrapper[4791]: I0218 00:36:07.061287 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:07 crc kubenswrapper[4791]: I0218 00:36:07.061482 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:07 crc kubenswrapper[4791]: E0218 00:36:07.061879 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:07 crc kubenswrapper[4791]: E0218 00:36:07.062086 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:07 crc kubenswrapper[4791]: E0218 00:36:07.062238 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:08 crc kubenswrapper[4791]: I0218 00:36:08.060778 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:08 crc kubenswrapper[4791]: E0218 00:36:08.060909 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:09 crc kubenswrapper[4791]: I0218 00:36:09.060456 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:09 crc kubenswrapper[4791]: I0218 00:36:09.060522 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:09 crc kubenswrapper[4791]: E0218 00:36:09.062536 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:09 crc kubenswrapper[4791]: I0218 00:36:09.062611 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:09 crc kubenswrapper[4791]: E0218 00:36:09.063003 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:09 crc kubenswrapper[4791]: E0218 00:36:09.063036 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:10 crc kubenswrapper[4791]: I0218 00:36:10.061120 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:10 crc kubenswrapper[4791]: E0218 00:36:10.061433 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:10 crc kubenswrapper[4791]: I0218 00:36:10.061704 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:36:10 crc kubenswrapper[4791]: E0218 00:36:10.061854 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tr5hg_openshift-ovn-kubernetes(3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" Feb 18 00:36:11 crc kubenswrapper[4791]: I0218 00:36:11.060992 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:11 crc kubenswrapper[4791]: I0218 00:36:11.061060 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:11 crc kubenswrapper[4791]: I0218 00:36:11.061103 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:11 crc kubenswrapper[4791]: E0218 00:36:11.061200 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:11 crc kubenswrapper[4791]: E0218 00:36:11.061328 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:11 crc kubenswrapper[4791]: E0218 00:36:11.061565 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:12 crc kubenswrapper[4791]: I0218 00:36:12.061335 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:12 crc kubenswrapper[4791]: E0218 00:36:12.061583 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:13 crc kubenswrapper[4791]: I0218 00:36:13.061113 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:13 crc kubenswrapper[4791]: I0218 00:36:13.061237 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:13 crc kubenswrapper[4791]: I0218 00:36:13.061117 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:13 crc kubenswrapper[4791]: E0218 00:36:13.061414 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:13 crc kubenswrapper[4791]: E0218 00:36:13.061515 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:13 crc kubenswrapper[4791]: E0218 00:36:13.061641 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:14 crc kubenswrapper[4791]: I0218 00:36:14.060827 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:14 crc kubenswrapper[4791]: E0218 00:36:14.061003 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:15 crc kubenswrapper[4791]: I0218 00:36:15.060722 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:15 crc kubenswrapper[4791]: I0218 00:36:15.060765 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:15 crc kubenswrapper[4791]: I0218 00:36:15.060935 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:15 crc kubenswrapper[4791]: E0218 00:36:15.060930 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:15 crc kubenswrapper[4791]: E0218 00:36:15.061050 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:15 crc kubenswrapper[4791]: E0218 00:36:15.061127 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:16 crc kubenswrapper[4791]: I0218 00:36:16.060312 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:16 crc kubenswrapper[4791]: E0218 00:36:16.060584 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:17 crc kubenswrapper[4791]: I0218 00:36:17.060239 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:17 crc kubenswrapper[4791]: I0218 00:36:17.060278 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:17 crc kubenswrapper[4791]: E0218 00:36:17.060353 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:17 crc kubenswrapper[4791]: I0218 00:36:17.060370 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:17 crc kubenswrapper[4791]: E0218 00:36:17.060448 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:17 crc kubenswrapper[4791]: E0218 00:36:17.060521 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:18 crc kubenswrapper[4791]: I0218 00:36:18.060191 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:18 crc kubenswrapper[4791]: E0218 00:36:18.060297 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.060248 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.060244 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.060325 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.061251 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.061330 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.061414 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.081718 4791 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.167010 4791 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.910639 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/1.log" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.911844 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/0.log" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.911889 4791 generic.go:334] "Generic (PLEG): container finished" podID="83bdb769-59eb-4472-ba08-be5897ee2cd6" containerID="63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099" exitCode=1 Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.911935 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerDied","Data":"63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099"} Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.912032 4791 scope.go:117] "RemoveContainer" containerID="773a81d0ced2fa6465ee7994e3bb1ec35f8f2b57adee47a5c2d0b6283dd33e26" Feb 18 00:36:19 crc kubenswrapper[4791]: I0218 00:36:19.912742 4791 scope.go:117] "RemoveContainer" containerID="63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099" Feb 18 00:36:19 crc kubenswrapper[4791]: E0218 00:36:19.913093 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-d2kpn_openshift-multus(83bdb769-59eb-4472-ba08-be5897ee2cd6)\"" pod="openshift-multus/multus-d2kpn" podUID="83bdb769-59eb-4472-ba08-be5897ee2cd6" Feb 18 00:36:20 crc kubenswrapper[4791]: I0218 00:36:20.060417 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:20 crc kubenswrapper[4791]: E0218 00:36:20.061277 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:20 crc kubenswrapper[4791]: I0218 00:36:20.915903 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/1.log" Feb 18 00:36:21 crc kubenswrapper[4791]: I0218 00:36:21.060573 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:21 crc kubenswrapper[4791]: I0218 00:36:21.060656 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:21 crc kubenswrapper[4791]: E0218 00:36:21.060710 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:21 crc kubenswrapper[4791]: E0218 00:36:21.060954 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:21 crc kubenswrapper[4791]: I0218 00:36:21.061100 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:21 crc kubenswrapper[4791]: E0218 00:36:21.061260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:22 crc kubenswrapper[4791]: I0218 00:36:22.060584 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:22 crc kubenswrapper[4791]: E0218 00:36:22.060776 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:23 crc kubenswrapper[4791]: I0218 00:36:23.060571 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:23 crc kubenswrapper[4791]: I0218 00:36:23.060687 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:23 crc kubenswrapper[4791]: I0218 00:36:23.060580 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:23 crc kubenswrapper[4791]: E0218 00:36:23.060747 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:23 crc kubenswrapper[4791]: E0218 00:36:23.060680 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:23 crc kubenswrapper[4791]: E0218 00:36:23.060909 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:24 crc kubenswrapper[4791]: I0218 00:36:24.060706 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:24 crc kubenswrapper[4791]: E0218 00:36:24.060933 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:24 crc kubenswrapper[4791]: E0218 00:36:24.169089 4791 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.061540 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.061605 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.061548 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:25 crc kubenswrapper[4791]: E0218 00:36:25.061742 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:25 crc kubenswrapper[4791]: E0218 00:36:25.062046 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.062113 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:36:25 crc kubenswrapper[4791]: E0218 00:36:25.062336 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.883198 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jq75l"] Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.883339 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:25 crc kubenswrapper[4791]: E0218 00:36:25.883460 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.935077 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/3.log" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.938007 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerStarted","Data":"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380"} Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.938412 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:36:25 crc kubenswrapper[4791]: I0218 00:36:25.964034 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podStartSLOduration=100.964015552 podStartE2EDuration="1m40.964015552s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:25.963595718 +0000 UTC m=+127.531608878" watchObservedRunningTime="2026-02-18 00:36:25.964015552 +0000 UTC m=+127.532028722" Feb 18 00:36:27 crc kubenswrapper[4791]: I0218 00:36:27.060759 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:27 crc kubenswrapper[4791]: E0218 00:36:27.061096 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:27 crc kubenswrapper[4791]: I0218 00:36:27.060803 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:27 crc kubenswrapper[4791]: I0218 00:36:27.060759 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:27 crc kubenswrapper[4791]: E0218 00:36:27.061232 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:27 crc kubenswrapper[4791]: I0218 00:36:27.060929 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:27 crc kubenswrapper[4791]: E0218 00:36:27.061307 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:27 crc kubenswrapper[4791]: E0218 00:36:27.061436 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:29 crc kubenswrapper[4791]: I0218 00:36:29.060815 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:29 crc kubenswrapper[4791]: I0218 00:36:29.064191 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:29 crc kubenswrapper[4791]: I0218 00:36:29.064242 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:29 crc kubenswrapper[4791]: I0218 00:36:29.064283 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:29 crc kubenswrapper[4791]: E0218 00:36:29.064152 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:29 crc kubenswrapper[4791]: E0218 00:36:29.064428 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:29 crc kubenswrapper[4791]: E0218 00:36:29.064538 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:29 crc kubenswrapper[4791]: E0218 00:36:29.064655 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:29 crc kubenswrapper[4791]: E0218 00:36:29.169614 4791 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:36:31 crc kubenswrapper[4791]: I0218 00:36:31.060432 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:31 crc kubenswrapper[4791]: I0218 00:36:31.060492 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:31 crc kubenswrapper[4791]: I0218 00:36:31.060572 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:31 crc kubenswrapper[4791]: E0218 00:36:31.060694 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:31 crc kubenswrapper[4791]: I0218 00:36:31.060776 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:31 crc kubenswrapper[4791]: E0218 00:36:31.060874 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:31 crc kubenswrapper[4791]: E0218 00:36:31.061025 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:31 crc kubenswrapper[4791]: E0218 00:36:31.061126 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:33 crc kubenswrapper[4791]: I0218 00:36:33.060281 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:33 crc kubenswrapper[4791]: I0218 00:36:33.060566 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:33 crc kubenswrapper[4791]: I0218 00:36:33.060629 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:33 crc kubenswrapper[4791]: E0218 00:36:33.060676 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:33 crc kubenswrapper[4791]: I0218 00:36:33.060791 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:33 crc kubenswrapper[4791]: E0218 00:36:33.060885 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:33 crc kubenswrapper[4791]: E0218 00:36:33.061240 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:33 crc kubenswrapper[4791]: E0218 00:36:33.061241 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:34 crc kubenswrapper[4791]: I0218 00:36:34.061410 4791 scope.go:117] "RemoveContainer" containerID="63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099" Feb 18 00:36:34 crc kubenswrapper[4791]: E0218 00:36:34.172059 4791 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.052867 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/1.log" Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.052955 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerStarted","Data":"fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0"} Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.060518 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.060561 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.060673 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:35 crc kubenswrapper[4791]: I0218 00:36:35.060873 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:35 crc kubenswrapper[4791]: E0218 00:36:35.060894 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:35 crc kubenswrapper[4791]: E0218 00:36:35.060993 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:35 crc kubenswrapper[4791]: E0218 00:36:35.061117 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:35 crc kubenswrapper[4791]: E0218 00:36:35.061347 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:37 crc kubenswrapper[4791]: I0218 00:36:37.060128 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:37 crc kubenswrapper[4791]: I0218 00:36:37.060223 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:37 crc kubenswrapper[4791]: I0218 00:36:37.060219 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:37 crc kubenswrapper[4791]: I0218 00:36:37.060182 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:37 crc kubenswrapper[4791]: E0218 00:36:37.060403 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:37 crc kubenswrapper[4791]: E0218 00:36:37.060575 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:37 crc kubenswrapper[4791]: E0218 00:36:37.060721 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:37 crc kubenswrapper[4791]: E0218 00:36:37.060855 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:39 crc kubenswrapper[4791]: I0218 00:36:39.060923 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:39 crc kubenswrapper[4791]: I0218 00:36:39.060948 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:39 crc kubenswrapper[4791]: I0218 00:36:39.060952 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:39 crc kubenswrapper[4791]: E0218 00:36:39.062236 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 18 00:36:39 crc kubenswrapper[4791]: I0218 00:36:39.062264 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:39 crc kubenswrapper[4791]: E0218 00:36:39.062428 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 18 00:36:39 crc kubenswrapper[4791]: E0218 00:36:39.062635 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jq75l" podUID="afcf9ee1-4224-441c-a98d-9330bed34065" Feb 18 00:36:39 crc kubenswrapper[4791]: E0218 00:36:39.062715 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.060615 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.060675 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.061333 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.061348 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.064669 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.065219 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.065429 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.065665 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.066567 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 18 00:36:41 crc kubenswrapper[4791]: I0218 00:36:41.066637 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.098837 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.099002 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.099053 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:47 crc kubenswrapper[4791]: E0218 00:36:47.099196 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:38:49.099134024 +0000 UTC m=+270.667147224 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.099334 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.099384 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.101150 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.107824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.108051 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.109006 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.112630 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.140672 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:47 crc kubenswrapper[4791]: I0218 00:36:47.393949 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 18 00:36:47 crc kubenswrapper[4791]: W0218 00:36:47.415039 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-95308df0d462a0dae5810e0d6574adda730017ba34388f75991a9be2dcd8efca WatchSource:0}: Error finding container 95308df0d462a0dae5810e0d6574adda730017ba34388f75991a9be2dcd8efca: Status 404 returned error can't find the container with id 95308df0d462a0dae5810e0d6574adda730017ba34388f75991a9be2dcd8efca Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.103282 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9ec8f5baa8724f2cd71970c68c512aa99c3f51f47c0f190100eb7c64640f9c16"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.103670 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"cddd6ab45bcc5ca4e5b13c87ccebea60822c64c8bff1f46c2de22172854e95f1"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.103889 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.121664 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"04d73f74d19c79e7e070fb7e81d06d7887f492a49da28e4152f435df87cd858d"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.121732 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f5ebebed45e52cb15fe8f01c2cb51e791bb0c670a255b3b9ce22811f8aa70eb6"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.130483 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"6a7b9f135c5db05137315158495b0d89e3345c9f9fdc7e7772b56347c934e014"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.130598 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"95308df0d462a0dae5810e0d6574adda730017ba34388f75991a9be2dcd8efca"} Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.399235 4791 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.442673 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xdf9g"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.443480 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.445353 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rwmnp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.445770 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.447123 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.447530 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.447822 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.448071 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.448426 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.448653 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.448887 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.450891 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.451636 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.452151 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.453770 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.454002 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.456108 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.456406 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.456749 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.456970 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.457189 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.457419 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.460898 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29522880-928kh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.461258 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-r4t2n"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.461491 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.461858 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.463173 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.463294 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.464911 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.465025 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.465133 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.465787 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.465998 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.467968 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.468303 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"serviceca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.468620 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.468876 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.469222 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-v7s8c"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.469652 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.470120 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.470315 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.470436 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.470642 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.471386 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.473691 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"pruner-dockercfg-p7bcw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.474620 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.474686 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.474797 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.475214 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.475681 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.475965 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.476122 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.476578 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.477812 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.478591 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.478631 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.480427 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.481075 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.482815 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.483404 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.484054 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.495369 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.495691 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.495913 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.496184 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-qj494"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.496541 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.497901 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.498008 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.498217 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.498265 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.498547 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.501599 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.502888 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503190 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503360 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503457 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503544 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503627 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503730 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503822 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.503934 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504037 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504139 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504269 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504281 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504458 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504549 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504644 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504815 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504823 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.504984 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.505439 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.505537 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rwmnp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.506329 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-r4t2n"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.506839 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.507038 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.507340 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xdf9g"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.508893 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.510863 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.512171 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29522880-928kh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.512218 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514573 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-policies\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514605 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl5sf\" (UniqueName: \"kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514625 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-client\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514645 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-images\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514660 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514676 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit-dir\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514694 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f5c71b3e-957e-466a-97c6-b114ee0eea13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514711 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514730 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514745 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514764 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-node-pullsecrets\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514781 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514797 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-encryption-config\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514813 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-encryption-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514833 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2fe40bf-a690-426b-af71-d9e8af02202c-serving-cert\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514852 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-image-import-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514871 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-trusted-ca\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514892 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514911 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465afa1-5937-4edd-a90f-27cec9705554-serving-cert\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514928 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7npf\" (UniqueName: \"kubernetes.io/projected/d465afa1-5937-4edd-a90f-27cec9705554-kube-api-access-p7npf\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514948 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514967 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9hxx\" (UniqueName: \"kubernetes.io/projected/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-kube-api-access-g9hxx\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.514988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p28tm\" (UniqueName: \"kubernetes.io/projected/f5c71b3e-957e-466a-97c6-b114ee0eea13-kube-api-access-p28tm\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515009 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-serving-cert\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515026 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515062 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26hsc\" (UniqueName: \"kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515086 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-dir\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515106 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-config\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515125 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-client\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515143 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515183 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwtnb\" (UniqueName: \"kubernetes.io/projected/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-kube-api-access-dwtnb\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515205 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515224 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xcp9\" (UniqueName: \"kubernetes.io/projected/c2fe40bf-a690-426b-af71-d9e8af02202c-kube-api-access-9xcp9\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-config\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515268 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-service-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515290 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-config\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515309 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-serving-cert\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515328 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.515361 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.516598 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.516704 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.517136 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.517465 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.517671 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.517803 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.523628 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524001 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524119 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524240 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524386 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524501 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.524624 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.526132 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.527345 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.527952 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.529609 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531001 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531471 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531487 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531752 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531831 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531863 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.531882 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532014 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532044 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532124 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532253 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532314 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532339 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.532434 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.541082 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.541593 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.542507 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.543101 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.543393 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.544084 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.545226 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.545313 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.546115 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.546928 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.552121 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.552125 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.552758 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.552958 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.553396 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.553539 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.554124 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.554229 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.554512 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.554926 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.555420 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.555891 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.556300 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t5878"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.556725 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.557122 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.562567 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.562763 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.562901 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.563543 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.563872 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.564244 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.570850 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.578002 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.578374 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.578392 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pxzzf"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.578455 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.579190 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.579270 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.579517 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2ph78"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.579566 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.579805 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580075 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-vlrzr"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580287 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580377 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580653 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580964 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-tk98h"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.580985 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.581019 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.581342 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.583352 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.585119 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.586883 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.587466 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.589013 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.590051 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.590704 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.591793 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.592497 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.592827 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.596863 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-qj494"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.600599 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2mrz5"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.601335 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-v7s8c"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.601356 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.603432 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.603735 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t5878"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.605633 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.607745 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.609984 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.610031 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.610042 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.610649 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.614877 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616374 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7qb5\" (UniqueName: \"kubernetes.io/projected/12d0d3a0-841c-4a06-aad2-c52c33800392-kube-api-access-x7qb5\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616402 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86198f6e-9eb6-4058-bf71-543f8283d54c-metrics-tls\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616428 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xcp9\" (UniqueName: \"kubernetes.io/projected/c2fe40bf-a690-426b-af71-d9e8af02202c-kube-api-access-9xcp9\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616453 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-config\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616469 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616485 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6wqn\" (UniqueName: \"kubernetes.io/projected/0c4d0046-4e3a-45c8-9d0c-185714e546d0-kube-api-access-m6wqn\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616501 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kgdr\" (UniqueName: \"kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616516 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-images\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616533 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmx59\" (UniqueName: \"kubernetes.io/projected/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-kube-api-access-qmx59\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616548 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616590 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-service-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616606 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616620 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616635 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq7q4\" (UniqueName: \"kubernetes.io/projected/cf6cb538-0c36-4140-a49f-2fcf19d46169-kube-api-access-xq7q4\") pod \"downloads-7954f5f757-qj494\" (UID: \"cf6cb538-0c36-4140-a49f-2fcf19d46169\") " pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616650 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-config\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616665 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-serving-cert\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616681 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616696 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fa9bca6-1980-4a13-9c30-5d693c51b72c-tmpfs\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616710 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616727 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a4f1858f-9974-4098-a264-b981c587623b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616745 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhj2k\" (UniqueName: \"kubernetes.io/projected/a4f1858f-9974-4098-a264-b981c587623b-kube-api-access-dhj2k\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616759 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616775 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxxj8\" (UniqueName: \"kubernetes.io/projected/668a019f-7e1c-4aca-a07c-3f22308a66c2-kube-api-access-pxxj8\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616789 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-webhook-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616813 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksj7w\" (UniqueName: \"kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616831 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616846 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616864 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616892 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616908 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616924 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rcr2\" (UniqueName: \"kubernetes.io/projected/a7563a68-6d05-4beb-8d19-ab941578e67e-kube-api-access-5rcr2\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616941 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616958 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-policies\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616972 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl5sf\" (UniqueName: \"kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.616986 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-client\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617002 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6edc9cce-f90d-4bff-bc33-43ff4051944f-config\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617018 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-srv-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617032 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnkl7\" (UniqueName: \"kubernetes.io/projected/6edc9cce-f90d-4bff-bc33-43ff4051944f-kube-api-access-fnkl7\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617047 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617063 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617080 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-images\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617095 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617116 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617132 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-apiservice-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617148 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f5c71b3e-957e-466a-97c6-b114ee0eea13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617180 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617199 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm846\" (UniqueName: \"kubernetes.io/projected/523f9b10-74b8-45bb-b400-054b069286dc-kube-api-access-jm846\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617217 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit-dir\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617233 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617250 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617266 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668a019f-7e1c-4aca-a07c-3f22308a66c2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617281 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617297 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86198f6e-9eb6-4058-bf71-543f8283d54c-trusted-ca\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617314 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9xm7\" (UniqueName: \"kubernetes.io/projected/b06fac8f-e399-462f-9665-35781318a69d-kube-api-access-g9xm7\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617331 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617346 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6edc9cce-f90d-4bff-bc33-43ff4051944f-serving-cert\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617365 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv26x\" (UniqueName: \"kubernetes.io/projected/73644233-2e9a-458d-8513-f56dddce1c55-kube-api-access-bv26x\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617380 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tqvq\" (UniqueName: \"kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617397 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-node-pullsecrets\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617414 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617430 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4b4w\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-kube-api-access-q4b4w\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617446 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-encryption-config\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617476 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617493 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-encryption-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617510 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2fe40bf-a690-426b-af71-d9e8af02202c-serving-cert\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617526 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617541 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617557 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617574 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b06fac8f-e399-462f-9665-35781318a69d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617590 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-image-import-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617605 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617621 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465afa1-5937-4edd-a90f-27cec9705554-serving-cert\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617635 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-trusted-ca\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617652 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668a019f-7e1c-4aca-a07c-3f22308a66c2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617670 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617687 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7npf\" (UniqueName: \"kubernetes.io/projected/d465afa1-5937-4edd-a90f-27cec9705554-kube-api-access-p7npf\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617703 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617719 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcjwn\" (UniqueName: \"kubernetes.io/projected/e0149f47-a655-434f-982e-b8872ea180c4-kube-api-access-tcjwn\") pod \"migrator-59844c95c7-rfkpk\" (UID: \"e0149f47-a655-434f-982e-b8872ea180c4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617737 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617754 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617771 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-serving-cert\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9hxx\" (UniqueName: \"kubernetes.io/projected/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-kube-api-access-g9hxx\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617804 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p28tm\" (UniqueName: \"kubernetes.io/projected/f5c71b3e-957e-466a-97c6-b114ee0eea13-kube-api-access-p28tm\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617821 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617836 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617852 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617871 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40569d9f-89af-4f68-855d-88d881e71de5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617892 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0084382-ff2c-4c0d-8460-b0694790a78b-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617908 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw4zr\" (UniqueName: \"kubernetes.io/projected/a0084382-ff2c-4c0d-8460-b0694790a78b-kube-api-access-gw4zr\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617925 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26hsc\" (UniqueName: \"kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617939 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-dir\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617956 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40569d9f-89af-4f68-855d-88d881e71de5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617972 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2dw4\" (UniqueName: \"kubernetes.io/projected/7fa9bca6-1980-4a13-9c30-5d693c51b72c-kube-api-access-b2dw4\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.617988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p42rv\" (UniqueName: \"kubernetes.io/projected/40569d9f-89af-4f68-855d-88d881e71de5-kube-api-access-p42rv\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618004 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b06fac8f-e399-462f-9665-35781318a69d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618020 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-config\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618036 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-client\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618053 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-auth-proxy-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618069 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12d0d3a0-841c-4a06-aad2-c52c33800392-machine-approver-tls\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618085 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73644233-2e9a-458d-8513-f56dddce1c55-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618102 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq7cw\" (UniqueName: \"kubernetes.io/projected/e7603b81-f0fd-4f84-980d-672efe28d72f-kube-api-access-gq7cw\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618116 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-serving-cert\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618131 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e7603b81-f0fd-4f84-980d-672efe28d72f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618148 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618182 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwtnb\" (UniqueName: \"kubernetes.io/projected/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-kube-api-access-dwtnb\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618199 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73644233-2e9a-458d-8513-f56dddce1c55-proxy-tls\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618216 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618232 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618248 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a7563a68-6d05-4beb-8d19-ab941578e67e-proxy-tls\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.618266 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/523f9b10-74b8-45bb-b400-054b069286dc-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.623929 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-config\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.627422 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.627479 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.627490 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.628095 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-service-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.628633 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-config\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.628934 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.636212 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.637347 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-serving-cert\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638118 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-encryption-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638469 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638586 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit-dir\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638689 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-policies\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638750 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-node-pullsecrets\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.638786 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-audit-dir\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.639210 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.639801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-config\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.639806 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-config\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.640649 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.640793 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.643900 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.644772 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465afa1-5937-4edd-a90f-27cec9705554-serving-cert\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.645003 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-serving-cert\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.645391 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.645537 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-etcd-client\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.646133 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f5c71b3e-957e-466a-97c6-b114ee0eea13-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.646264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-image-import-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.647103 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.647446 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-client\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653017 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d465afa1-5937-4edd-a90f-27cec9705554-trusted-ca\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653444 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-encryption-config\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653515 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-etcd-serving-ca\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653737 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f5c71b3e-957e-466a-97c6-b114ee0eea13-images\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653777 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-audit\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.653933 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pxzzf"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.654873 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2fe40bf-a690-426b-af71-d9e8af02202c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.655371 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2ph78"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.656193 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.656569 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-fsfrp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.657305 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.657605 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2fe40bf-a690-426b-af71-d9e8af02202c-serving-cert\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.657939 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.659372 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.660902 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.669553 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.669636 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.672906 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vlrzr"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.674415 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.674967 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.676247 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.677842 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.679554 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.680800 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.681764 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.685399 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.685500 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.686268 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.687334 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2mrz5"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.688386 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fsfrp"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.689682 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-6xhp5"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.690238 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.690679 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l4ktw"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.691990 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l4ktw"] Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.692119 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.705171 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.719370 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-auth-proxy-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.719573 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73644233-2e9a-458d-8513-f56dddce1c55-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.719703 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcb9j\" (UniqueName: \"kubernetes.io/projected/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-kube-api-access-hcb9j\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.719827 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-config\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.719912 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e7603b81-f0fd-4f84-980d-672efe28d72f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.720039 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-auth-proxy-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.720589 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73644233-2e9a-458d-8513-f56dddce1c55-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.720838 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-serving-cert\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.721457 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/523f9b10-74b8-45bb-b400-054b069286dc-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.721583 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86198f6e-9eb6-4058-bf71-543f8283d54c-metrics-tls\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.721734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8671eab-3f50-466b-b6aa-f20c971e1b7e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.721862 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kgdr\" (UniqueName: \"kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.721967 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmx59\" (UniqueName: \"kubernetes.io/projected/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-kube-api-access-qmx59\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722066 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722140 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722282 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722465 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-client\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722825 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fa9bca6-1980-4a13-9c30-5d693c51b72c-tmpfs\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722940 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq7q4\" (UniqueName: \"kubernetes.io/projected/cf6cb538-0c36-4140-a49f-2fcf19d46169-kube-api-access-xq7q4\") pod \"downloads-7954f5f757-qj494\" (UID: \"cf6cb538-0c36-4140-a49f-2fcf19d46169\") " pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723046 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723128 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a4f1858f-9974-4098-a264-b981c587623b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723455 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhj2k\" (UniqueName: \"kubernetes.io/projected/a4f1858f-9974-4098-a264-b981c587623b-kube-api-access-dhj2k\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723539 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723276 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fa9bca6-1980-4a13-9c30-5d693c51b72c-tmpfs\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.722997 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.724339 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-serving-cert\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.724338 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.724518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.723638 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksj7w\" (UniqueName: \"kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.724915 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.725129 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.725469 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728426 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.727702 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ec919db-352a-41eb-9564-dfc8add29680-config\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728482 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mpgl\" (UniqueName: \"kubernetes.io/projected/5826b603-6616-49b5-a4d0-9dd63e715c9e-kube-api-access-2mpgl\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728503 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728685 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6edc9cce-f90d-4bff-bc33-43ff4051944f-config\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728732 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xdfq\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-kube-api-access-5xdfq\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728759 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728778 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728799 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728831 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm846\" (UniqueName: \"kubernetes.io/projected/523f9b10-74b8-45bb-b400-054b069286dc-kube-api-access-jm846\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728856 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728886 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6edc9cce-f90d-4bff-bc33-43ff4051944f-serving-cert\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728903 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728924 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-cabundle\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728950 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pl5c\" (UniqueName: \"kubernetes.io/projected/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-kube-api-access-8pl5c\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728968 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.728997 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729016 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729032 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729052 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b06fac8f-e399-462f-9665-35781318a69d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729055 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729069 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5ec919db-352a-41eb-9564-dfc8add29680-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729095 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729123 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729144 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcjwn\" (UniqueName: \"kubernetes.io/projected/e0149f47-a655-434f-982e-b8872ea180c4-kube-api-access-tcjwn\") pod \"migrator-59844c95c7-rfkpk\" (UID: \"e0149f47-a655-434f-982e-b8872ea180c4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729211 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729259 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729279 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729300 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40569d9f-89af-4f68-855d-88d881e71de5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729334 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0084382-ff2c-4c0d-8460-b0694790a78b-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.729351 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.730199 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.730443 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731022 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731183 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731341 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731451 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2dw4\" (UniqueName: \"kubernetes.io/projected/7fa9bca6-1980-4a13-9c30-5d693c51b72c-kube-api-access-b2dw4\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731474 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p42rv\" (UniqueName: \"kubernetes.io/projected/40569d9f-89af-4f68-855d-88d881e71de5-kube-api-access-p42rv\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.731510 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b06fac8f-e399-462f-9665-35781318a69d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732068 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12d0d3a0-841c-4a06-aad2-c52c33800392-machine-approver-tls\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732135 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq7cw\" (UniqueName: \"kubernetes.io/projected/e7603b81-f0fd-4f84-980d-672efe28d72f-kube-api-access-gq7cw\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732202 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732221 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-service-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732249 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73644233-2e9a-458d-8513-f56dddce1c55-proxy-tls\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732287 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732319 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732337 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a7563a68-6d05-4beb-8d19-ab941578e67e-proxy-tls\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b06fac8f-e399-462f-9665-35781318a69d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732704 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732759 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ec919db-352a-41eb-9564-dfc8add29680-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732779 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732805 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7qb5\" (UniqueName: \"kubernetes.io/projected/12d0d3a0-841c-4a06-aad2-c52c33800392-kube-api-access-x7qb5\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732854 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732915 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732935 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6wqn\" (UniqueName: \"kubernetes.io/projected/0c4d0046-4e3a-45c8-9d0c-185714e546d0-kube-api-access-m6wqn\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.732979 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-images\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733029 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733049 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733123 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-key\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733389 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733659 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12d0d3a0-841c-4a06-aad2-c52c33800392-config\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733728 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxxj8\" (UniqueName: \"kubernetes.io/projected/668a019f-7e1c-4aca-a07c-3f22308a66c2-kube-api-access-pxxj8\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733748 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-webhook-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733781 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733804 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733840 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733847 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a7563a68-6d05-4beb-8d19-ab941578e67e-images\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733858 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rcr2\" (UniqueName: \"kubernetes.io/projected/a7563a68-6d05-4beb-8d19-ab941578e67e-kube-api-access-5rcr2\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733919 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83ea23eb-c756-4262-a2d7-218bc3fb25ac-config-volume\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.733939 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-srv-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734640 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnkl7\" (UniqueName: \"kubernetes.io/projected/6edc9cce-f90d-4bff-bc33-43ff4051944f-kube-api-access-fnkl7\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734758 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-apiservice-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734782 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7ttg\" (UniqueName: \"kubernetes.io/projected/c6450a7c-d805-4e80-aee0-ae49750d8dc7-kube-api-access-k7ttg\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734801 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734842 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxk9n\" (UniqueName: \"kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734861 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-serving-cert\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734924 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668a019f-7e1c-4aca-a07c-3f22308a66c2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734941 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734959 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86198f6e-9eb6-4058-bf71-543f8283d54c-trusted-ca\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.734998 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9xm7\" (UniqueName: \"kubernetes.io/projected/b06fac8f-e399-462f-9665-35781318a69d-kube-api-access-g9xm7\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735020 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tqvq\" (UniqueName: \"kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735038 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4b4w\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-kube-api-access-q4b4w\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735054 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv26x\" (UniqueName: \"kubernetes.io/projected/73644233-2e9a-458d-8513-f56dddce1c55-kube-api-access-bv26x\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735094 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735111 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735132 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-profile-collector-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735185 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668a019f-7e1c-4aca-a07c-3f22308a66c2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735204 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/83ea23eb-c756-4262-a2d7-218bc3fb25ac-metrics-tls\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735239 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkzjh\" (UniqueName: \"kubernetes.io/projected/83ea23eb-c756-4262-a2d7-218bc3fb25ac-kube-api-access-nkzjh\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735271 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-kube-api-access-sl58g\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735289 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735313 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85eab95a-ee70-497d-a22d-1cf047cb906a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735335 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40569d9f-89af-4f68-855d-88d881e71de5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.735353 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw4zr\" (UniqueName: \"kubernetes.io/projected/a0084382-ff2c-4c0d-8460-b0694790a78b-kube-api-access-gw4zr\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.736427 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-webhook-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.737676 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.738718 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12d0d3a0-841c-4a06-aad2-c52c33800392-machine-approver-tls\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.738946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.738960 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.738978 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.739224 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.739370 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.739522 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.739582 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.740289 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.741014 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a7563a68-6d05-4beb-8d19-ab941578e67e-proxy-tls\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.741229 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.741337 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b06fac8f-e399-462f-9665-35781318a69d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.741923 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0084382-ff2c-4c0d-8460-b0694790a78b-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.742035 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/668a019f-7e1c-4aca-a07c-3f22308a66c2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.742201 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/668a019f-7e1c-4aca-a07c-3f22308a66c2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.742242 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.743897 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa9bca6-1980-4a13-9c30-5d693c51b72c-apiservice-cert\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.745540 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.765339 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.772737 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e7603b81-f0fd-4f84-980d-672efe28d72f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.785443 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.796738 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a4f1858f-9974-4098-a264-b981c587623b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.805526 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.825773 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836007 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836057 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ec919db-352a-41eb-9564-dfc8add29680-config\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836084 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mpgl\" (UniqueName: \"kubernetes.io/projected/5826b603-6616-49b5-a4d0-9dd63e715c9e-kube-api-access-2mpgl\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836124 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xdfq\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-kube-api-access-5xdfq\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836149 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836213 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836263 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-cabundle\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836294 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pl5c\" (UniqueName: \"kubernetes.io/projected/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-kube-api-access-8pl5c\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836320 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836349 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5ec919db-352a-41eb-9564-dfc8add29680-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836389 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836426 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836448 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836491 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836514 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-service-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836611 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836639 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ec919db-352a-41eb-9564-dfc8add29680-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836661 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836698 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836722 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-key\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836753 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836810 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83ea23eb-c756-4262-a2d7-218bc3fb25ac-config-volume\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836847 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7ttg\" (UniqueName: \"kubernetes.io/projected/c6450a7c-d805-4e80-aee0-ae49750d8dc7-kube-api-access-k7ttg\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836872 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836896 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxk9n\" (UniqueName: \"kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836915 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836937 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-serving-cert\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.836994 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-profile-collector-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837016 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/83ea23eb-c756-4262-a2d7-218bc3fb25ac-metrics-tls\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837036 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkzjh\" (UniqueName: \"kubernetes.io/projected/83ea23eb-c756-4262-a2d7-218bc3fb25ac-kube-api-access-nkzjh\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837059 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-kube-api-access-sl58g\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837090 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85eab95a-ee70-497d-a22d-1cf047cb906a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837132 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcb9j\" (UniqueName: \"kubernetes.io/projected/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-kube-api-access-hcb9j\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837173 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-config\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837217 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8671eab-3f50-466b-b6aa-f20c971e1b7e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837250 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.837276 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-client\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.843007 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-srv-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.845832 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.849397 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-profile-collector-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.855128 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/0c4d0046-4e3a-45c8-9d0c-185714e546d0-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.865210 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.889987 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.905810 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.913108 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40569d9f-89af-4f68-855d-88d881e71de5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.925276 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.927141 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40569d9f-89af-4f68-855d-88d881e71de5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.945597 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.965473 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.975498 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/523f9b10-74b8-45bb-b400-054b069286dc-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:48 crc kubenswrapper[4791]: I0218 00:36:48.985943 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.024979 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.044784 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.064984 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.086353 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.096635 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86198f6e-9eb6-4058-bf71-543f8283d54c-metrics-tls\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.105663 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.115974 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6edc9cce-f90d-4bff-bc33-43ff4051944f-serving-cert\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.125868 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.154673 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.157993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86198f6e-9eb6-4058-bf71-543f8283d54c-trusted-ca\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.166059 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.170323 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6edc9cce-f90d-4bff-bc33-43ff4051944f-config\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.187122 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.205709 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.225832 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.236273 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73644233-2e9a-458d-8513-f56dddce1c55-proxy-tls\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.245614 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.265911 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.287070 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.306129 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.327149 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.346893 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.362452 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-key\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.366366 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.367713 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-signing-cabundle\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.386341 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.407416 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.426092 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.446118 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.450859 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ec919db-352a-41eb-9564-dfc8add29680-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.466628 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.478120 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ec919db-352a-41eb-9564-dfc8add29680-config\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.486640 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.505353 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.525579 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.530941 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-serving-cert\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.555879 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.560258 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8671eab-3f50-466b-b6aa-f20c971e1b7e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.566309 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.571867 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-client\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.584343 4791 request.go:700] Waited for 1.003466049s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-etcd-operator/configmaps?fieldSelector=metadata.name%3Detcd-operator-config&limit=500&resourceVersion=0 Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.586331 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.589144 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-config\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.607139 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.627075 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.639550 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.646083 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.647957 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-etcd-service-ca\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.666964 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.685798 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.687880 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/83ea23eb-c756-4262-a2d7-218bc3fb25ac-config-volume\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.717840 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.725353 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.729278 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.746145 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.766078 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.771347 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/83ea23eb-c756-4262-a2d7-218bc3fb25ac-metrics-tls\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.787957 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.801068 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.805765 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.827804 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.836969 4791 secret.go:188] Couldn't get secret openshift-image-registry/image-registry-operator-tls: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837005 4791 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837079 4791 secret.go:188] Couldn't get secret openshift-ingress/router-certs-default: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837097 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls podName:d8671eab-3f50-466b-b6aa-f20c971e1b7e nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337058032 +0000 UTC m=+151.905071232 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-registry-operator-tls" (UniqueName: "kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls") pod "cluster-image-registry-operator-dc59b4c8b-7swxh" (UID: "d8671eab-3f50-466b-b6aa-f20c971e1b7e") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837099 4791 secret.go:188] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837113 4791 configmap.go:193] Couldn't get configMap openshift-ingress/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837149 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config podName:58de0ec6-b12b-4dcb-b0cb-95f2f786467b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337136235 +0000 UTC m=+151.905149445 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config") pod "openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" (UID: "58de0ec6-b12b-4dcb-b0cb-95f2f786467b") : failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837190 4791 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837116 4791 secret.go:188] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837228 4791 configmap.go:193] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837129 4791 secret.go:188] Couldn't get secret openshift-ingress/router-metrics-certs-default: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837124 4791 secret.go:188] Couldn't get secret openshift-ingress/router-stats-default: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837209 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate podName:5826b603-6616-49b5-a4d0-9dd63e715c9e nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337196847 +0000 UTC m=+151.905210057 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "default-certificate" (UniqueName: "kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate") pod "router-default-5444994796-tk98h" (UID: "5826b603-6616-49b5-a4d0-9dd63e715c9e") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837373 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert podName:58de0ec6-b12b-4dcb-b0cb-95f2f786467b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337341412 +0000 UTC m=+151.905354612 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert") pod "openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" (UID: "58de0ec6-b12b-4dcb-b0cb-95f2f786467b") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837389 4791 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837400 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert podName:8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337385783 +0000 UTC m=+151.905398993 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert") pod "catalog-operator-68c6474976-t5rkn" (UID: "8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837422 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle podName:5826b603-6616-49b5-a4d0-9dd63e715c9e nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337412054 +0000 UTC m=+151.905425254 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle") pod "router-default-5444994796-tk98h" (UID: "5826b603-6616-49b5-a4d0-9dd63e715c9e") : failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837445 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config podName:85eab95a-ee70-497d-a22d-1cf047cb906a nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337434445 +0000 UTC m=+151.905447645 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config") pod "kube-apiserver-operator-766d6c64bb-hb7bm" (UID: "85eab95a-ee70-497d-a22d-1cf047cb906a") : failed to sync configmap cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.837511 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs podName:5826b603-6616-49b5-a4d0-9dd63e715c9e nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.337497477 +0000 UTC m=+151.905510687 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs") pod "router-default-5444994796-tk98h" (UID: "5826b603-6616-49b5-a4d0-9dd63e715c9e") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.838349 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth podName:5826b603-6616-49b5-a4d0-9dd63e715c9e nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.338296713 +0000 UTC m=+151.906309893 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "stats-auth" (UniqueName: "kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth") pod "router-default-5444994796-tk98h" (UID: "5826b603-6616-49b5-a4d0-9dd63e715c9e") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.838378 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert podName:c6450a7c-d805-4e80-aee0-ae49750d8dc7 nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.338368905 +0000 UTC m=+151.906382085 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert") pod "ingress-canary-fsfrp" (UID: "c6450a7c-d805-4e80-aee0-ae49750d8dc7") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: E0218 00:36:49.838419 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert podName:85eab95a-ee70-497d-a22d-1cf047cb906a nodeName:}" failed. No retries permitted until 2026-02-18 00:36:50.338409466 +0000 UTC m=+151.906422646 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert") pod "kube-apiserver-operator-766d6c64bb-hb7bm" (UID: "85eab95a-ee70-497d-a22d-1cf047cb906a") : failed to sync secret cache: timed out waiting for the condition Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.846045 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.866103 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.886225 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.906004 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.925283 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.945352 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.965838 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 18 00:36:49 crc kubenswrapper[4791]: I0218 00:36:49.986712 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.005854 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.025738 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.046895 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.065896 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.086558 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.106472 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.126892 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.146211 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.166219 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.185068 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.205667 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.225898 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.245556 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.266122 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.286513 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.306184 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.340632 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xcp9\" (UniqueName: \"kubernetes.io/projected/c2fe40bf-a690-426b-af71-d9e8af02202c-kube-api-access-9xcp9\") pod \"authentication-operator-69f744f599-v7s8c\" (UID: \"c2fe40bf-a690-426b-af71-d9e8af02202c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.360542 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwtnb\" (UniqueName: \"kubernetes.io/projected/45cdb2a5-abaf-4f48-a61f-5ede25bd7bde-kube-api-access-dwtnb\") pod \"apiserver-7bbb656c7d-px7q8\" (UID: \"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.362797 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.362914 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.362949 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.362989 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363020 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363044 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363083 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363116 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363203 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363341 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.363378 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.365080 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85eab95a-ee70-497d-a22d-1cf047cb906a-config\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.365217 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5826b603-6616-49b5-a4d0-9dd63e715c9e-service-ca-bundle\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.365916 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-default-certificate\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.366422 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.366845 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-srv-cert\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.367905 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-metrics-certs\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.368577 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5826b603-6616-49b5-a4d0-9dd63e715c9e-stats-auth\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.369606 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85eab95a-ee70-497d-a22d-1cf047cb906a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.371524 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8671eab-3f50-466b-b6aa-f20c971e1b7e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.372334 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.380276 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl5sf\" (UniqueName: \"kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf\") pod \"controller-manager-879f6c89f-w8nfn\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.399304 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26hsc\" (UniqueName: \"kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc\") pod \"image-pruner-29522880-928kh\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.404363 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.413927 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.426994 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9hxx\" (UniqueName: \"kubernetes.io/projected/8eb994e6-6e8b-4d0f-84f2-92333f1571c1-kube-api-access-g9hxx\") pod \"apiserver-76f77b778f-xdf9g\" (UID: \"8eb994e6-6e8b-4d0f-84f2-92333f1571c1\") " pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.434636 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.445982 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p28tm\" (UniqueName: \"kubernetes.io/projected/f5c71b3e-957e-466a-97c6-b114ee0eea13-kube-api-access-p28tm\") pod \"machine-api-operator-5694c8668f-rwmnp\" (UID: \"f5c71b3e-957e-466a-97c6-b114ee0eea13\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.462914 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7npf\" (UniqueName: \"kubernetes.io/projected/d465afa1-5937-4edd-a90f-27cec9705554-kube-api-access-p7npf\") pod \"console-operator-58897d9998-r4t2n\" (UID: \"d465afa1-5937-4edd-a90f-27cec9705554\") " pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.466276 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.486800 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.506181 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.521724 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6450a7c-d805-4e80-aee0-ae49750d8dc7-cert\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.526224 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.566249 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.584543 4791 request.go:700] Waited for 1.894067978s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.586394 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.597093 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.605905 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.626537 4791 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.640645 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.646508 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.666046 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.671096 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.689524 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.699613 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kgdr\" (UniqueName: \"kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr\") pod \"oauth-openshift-558db77b4-gxtkn\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.723400 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmx59\" (UniqueName: \"kubernetes.io/projected/929ec2d3-2a0e-4031-9a2a-08b9744e2b40-kube-api-access-qmx59\") pod \"openshift-config-operator-7777fb866f-dvs8h\" (UID: \"929ec2d3-2a0e-4031-9a2a-08b9744e2b40\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.748877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq7q4\" (UniqueName: \"kubernetes.io/projected/cf6cb538-0c36-4140-a49f-2fcf19d46169-kube-api-access-xq7q4\") pod \"downloads-7954f5f757-qj494\" (UID: \"cf6cb538-0c36-4140-a49f-2fcf19d46169\") " pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.764365 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhj2k\" (UniqueName: \"kubernetes.io/projected/a4f1858f-9974-4098-a264-b981c587623b-kube-api-access-dhj2k\") pod \"control-plane-machine-set-operator-78cbb6b69f-82c2d\" (UID: \"a4f1858f-9974-4098-a264-b981c587623b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.764670 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-xdf9g"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.784076 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.787458 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksj7w\" (UniqueName: \"kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w\") pod \"console-f9d7485db-rxhdk\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.793301 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29522880-928kh"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.801483 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.802503 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcjwn\" (UniqueName: \"kubernetes.io/projected/e0149f47-a655-434f-982e-b8872ea180c4-kube-api-access-tcjwn\") pod \"migrator-59844c95c7-rfkpk\" (UID: \"e0149f47-a655-434f-982e-b8872ea180c4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.835801 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-v7s8c"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.836834 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.838859 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm846\" (UniqueName: \"kubernetes.io/projected/523f9b10-74b8-45bb-b400-054b069286dc-kube-api-access-jm846\") pod \"multus-admission-controller-857f4d67dd-t5878\" (UID: \"523f9b10-74b8-45bb-b400-054b069286dc\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.840017 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rwmnp"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.842874 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p42rv\" (UniqueName: \"kubernetes.io/projected/40569d9f-89af-4f68-855d-88d881e71de5-kube-api-access-p42rv\") pod \"kube-storage-version-migrator-operator-b67b599dd-9cr2f\" (UID: \"40569d9f-89af-4f68-855d-88d881e71de5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.844117 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.845274 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:50 crc kubenswrapper[4791]: W0218 00:36:50.846767 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2fe40bf_a690_426b_af71_d9e8af02202c.slice/crio-3d886834a7494dd973806fbcf89a18bc8a8eb4acc0a9ea91de98680ffe3ba788 WatchSource:0}: Error finding container 3d886834a7494dd973806fbcf89a18bc8a8eb4acc0a9ea91de98680ffe3ba788: Status 404 returned error can't find the container with id 3d886834a7494dd973806fbcf89a18bc8a8eb4acc0a9ea91de98680ffe3ba788 Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.859596 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" Feb 18 00:36:50 crc kubenswrapper[4791]: W0218 00:36:50.859631 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5c71b3e_957e_466a_97c6_b114ee0eea13.slice/crio-56721d4c92e25843bfe7890e63604eadfda9f3c0c5e87d2978f5acf1f9318371 WatchSource:0}: Error finding container 56721d4c92e25843bfe7890e63604eadfda9f3c0c5e87d2978f5acf1f9318371: Status 404 returned error can't find the container with id 56721d4c92e25843bfe7890e63604eadfda9f3c0c5e87d2978f5acf1f9318371 Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.860035 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq7cw\" (UniqueName: \"kubernetes.io/projected/e7603b81-f0fd-4f84-980d-672efe28d72f-kube-api-access-gq7cw\") pod \"package-server-manager-789f6589d5-k8nrp\" (UID: \"e7603b81-f0fd-4f84-980d-672efe28d72f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.883299 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.890673 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.891112 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-r4t2n"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.897669 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7qb5\" (UniqueName: \"kubernetes.io/projected/12d0d3a0-841c-4a06-aad2-c52c33800392-kube-api-access-x7qb5\") pod \"machine-approver-56656f9798-8tsx6\" (UID: \"12d0d3a0-841c-4a06-aad2-c52c33800392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.904825 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.905273 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6wqn\" (UniqueName: \"kubernetes.io/projected/0c4d0046-4e3a-45c8-9d0c-185714e546d0-kube-api-access-m6wqn\") pod \"olm-operator-6b444d44fb-dnxrh\" (UID: \"0c4d0046-4e3a-45c8-9d0c-185714e546d0\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.912146 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.922841 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.925612 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.948563 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rcr2\" (UniqueName: \"kubernetes.io/projected/a7563a68-6d05-4beb-8d19-ab941578e67e-kube-api-access-5rcr2\") pod \"machine-config-operator-74547568cd-2tcll\" (UID: \"a7563a68-6d05-4beb-8d19-ab941578e67e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.961498 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxxj8\" (UniqueName: \"kubernetes.io/projected/668a019f-7e1c-4aca-a07c-3f22308a66c2-kube-api-access-pxxj8\") pod \"openshift-apiserver-operator-796bbdcf4f-4zz5m\" (UID: \"668a019f-7e1c-4aca-a07c-3f22308a66c2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:50 crc kubenswrapper[4791]: W0218 00:36:50.967489 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda348cdf_f9c8_4e7c_b462_7d4979dada11.slice/crio-f3da2a06175ac96686413ab0f9eee802183401fd3a1355a069b06aa464d2cc81 WatchSource:0}: Error finding container f3da2a06175ac96686413ab0f9eee802183401fd3a1355a069b06aa464d2cc81: Status 404 returned error can't find the container with id f3da2a06175ac96686413ab0f9eee802183401fd3a1355a069b06aa464d2cc81 Feb 18 00:36:50 crc kubenswrapper[4791]: I0218 00:36:50.987824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw4zr\" (UniqueName: \"kubernetes.io/projected/a0084382-ff2c-4c0d-8460-b0694790a78b-kube-api-access-gw4zr\") pod \"cluster-samples-operator-665b6dd947-7pmtt\" (UID: \"a0084382-ff2c-4c0d-8460-b0694790a78b\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.008449 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tqvq\" (UniqueName: \"kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq\") pod \"route-controller-manager-6576b87f9c-2m72p\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.023431 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9xm7\" (UniqueName: \"kubernetes.io/projected/b06fac8f-e399-462f-9665-35781318a69d-kube-api-access-g9xm7\") pod \"openshift-controller-manager-operator-756b6f6bc6-xhnwk\" (UID: \"b06fac8f-e399-462f-9665-35781318a69d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.042451 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnkl7\" (UniqueName: \"kubernetes.io/projected/6edc9cce-f90d-4bff-bc33-43ff4051944f-kube-api-access-fnkl7\") pod \"service-ca-operator-777779d784-jhdhd\" (UID: \"6edc9cce-f90d-4bff-bc33-43ff4051944f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.066684 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4b4w\" (UniqueName: \"kubernetes.io/projected/86198f6e-9eb6-4058-bf71-543f8283d54c-kube-api-access-q4b4w\") pod \"ingress-operator-5b745b69d9-xlh9r\" (UID: \"86198f6e-9eb6-4058-bf71-543f8283d54c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.086056 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv26x\" (UniqueName: \"kubernetes.io/projected/73644233-2e9a-458d-8513-f56dddce1c55-kube-api-access-bv26x\") pod \"machine-config-controller-84d6567774-fpmgd\" (UID: \"73644233-2e9a-458d-8513-f56dddce1c55\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.095449 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.111487 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.122132 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2dw4\" (UniqueName: \"kubernetes.io/projected/7fa9bca6-1980-4a13-9c30-5d693c51b72c-kube-api-access-b2dw4\") pod \"packageserver-d55dfcdfc-82mpq\" (UID: \"7fa9bca6-1980-4a13-9c30-5d693c51b72c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.122356 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.125768 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mpgl\" (UniqueName: \"kubernetes.io/projected/5826b603-6616-49b5-a4d0-9dd63e715c9e-kube-api-access-2mpgl\") pod \"router-default-5444994796-tk98h\" (UID: \"5826b603-6616-49b5-a4d0-9dd63e715c9e\") " pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.130000 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.149786 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xdfq\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-kube-api-access-5xdfq\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.154069 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.167568 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.173432 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.181808 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29522880-928kh" event={"ID":"da9a0a6f-10c0-4fa0-8417-642ff4194832","Type":"ContainerStarted","Data":"e4a7db5ed63ade2524a1403f103e2253affe7f9a323aa09490dba5056eaf5962"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.181847 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29522880-928kh" event={"ID":"da9a0a6f-10c0-4fa0-8417-642ff4194832","Type":"ContainerStarted","Data":"8500f196330df72d58e1a8487943e12656f1493ca43dcf03fcb178b7061a18b4"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.190104 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" event={"ID":"da348cdf-f9c8-4e7c-b462-7d4979dada11","Type":"ContainerStarted","Data":"f3da2a06175ac96686413ab0f9eee802183401fd3a1355a069b06aa464d2cc81"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.192854 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" event={"ID":"f5c71b3e-957e-466a-97c6-b114ee0eea13","Type":"ContainerStarted","Data":"7def582fdc477859f85265c45f4b7a2c3e17d240119968d7c550f0c0d9fc795d"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.192903 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" event={"ID":"f5c71b3e-957e-466a-97c6-b114ee0eea13","Type":"ContainerStarted","Data":"56721d4c92e25843bfe7890e63604eadfda9f3c0c5e87d2978f5acf1f9318371"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.194247 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" event={"ID":"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde","Type":"ContainerStarted","Data":"4dc9ac64db1fcd2887b9382857c821c2f8b5baf02dcdd422a00b421e9ef26943"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.196818 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.200303 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" event={"ID":"c2fe40bf-a690-426b-af71-d9e8af02202c","Type":"ContainerStarted","Data":"82b1f2fbd4a0bb249c4a4deb259edd76e378621468ddeb1f163db789aa489734"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.200351 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" event={"ID":"c2fe40bf-a690-426b-af71-d9e8af02202c","Type":"ContainerStarted","Data":"3d886834a7494dd973806fbcf89a18bc8a8eb4acc0a9ea91de98680ffe3ba788"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.201175 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58de0ec6-b12b-4dcb-b0cb-95f2f786467b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tlnpm\" (UID: \"58de0ec6-b12b-4dcb-b0cb-95f2f786467b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.205433 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" event={"ID":"8eb994e6-6e8b-4d0f-84f2-92333f1571c1","Type":"ContainerStarted","Data":"80d4bb9e27e547975c3388ad54ebdf230107598ee3c496c4866f372e486dcb8b"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.208353 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" event={"ID":"d465afa1-5937-4edd-a90f-27cec9705554","Type":"ContainerStarted","Data":"533eae9ce89225fddf3ab72db93e4bd49d017c01b12ecb2c73ae660c979b7506"} Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.208629 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pl5c\" (UniqueName: \"kubernetes.io/projected/302ddff9-9052-4d4f-9b4b-b0c0b1b3a638-kube-api-access-8pl5c\") pod \"etcd-operator-b45778765-2ph78\" (UID: \"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.209461 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5ec919db-352a-41eb-9564-dfc8add29680-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9n2vw\" (UID: \"5ec919db-352a-41eb-9564-dfc8add29680\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.219817 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.220459 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7ttg\" (UniqueName: \"kubernetes.io/projected/c6450a7c-d805-4e80-aee0-ae49750d8dc7-kube-api-access-k7ttg\") pod \"ingress-canary-fsfrp\" (UID: \"c6450a7c-d805-4e80-aee0-ae49750d8dc7\") " pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.228842 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.240410 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxk9n\" (UniqueName: \"kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n\") pod \"marketplace-operator-79b997595-plj59\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.249492 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.262801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d8671eab-3f50-466b-b6aa-f20c971e1b7e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7swxh\" (UID: \"d8671eab-3f50-466b-b6aa-f20c971e1b7e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.269205 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.277981 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.280344 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkzjh\" (UniqueName: \"kubernetes.io/projected/83ea23eb-c756-4262-a2d7-218bc3fb25ac-kube-api-access-nkzjh\") pod \"dns-default-vlrzr\" (UID: \"83ea23eb-c756-4262-a2d7-218bc3fb25ac\") " pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.280368 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.289524 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.298408 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.306841 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.307288 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl58g\" (UniqueName: \"kubernetes.io/projected/6f0f9444-6d63-4e2b-b63e-eb14589c5fdb-kube-api-access-sl58g\") pod \"service-ca-9c57cc56f-pxzzf\" (UID: \"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb\") " pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.312084 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.334343 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85eab95a-ee70-497d-a22d-1cf047cb906a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hb7bm\" (UID: \"85eab95a-ee70-497d-a22d-1cf047cb906a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.344746 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.353130 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcb9j\" (UniqueName: \"kubernetes.io/projected/8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc-kube-api-access-hcb9j\") pod \"catalog-operator-68c6474976-t5rkn\" (UID: \"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.356857 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.364007 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fsfrp" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.369928 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.373703 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.375762 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-qj494"] Feb 18 00:36:51 crc kubenswrapper[4791]: W0218 00:36:51.399795 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc760ce4_d81e_4d1e_9341_1d8b40596f4c.slice/crio-c52bf3b8041eb179d87b05e9a2a9f7ee6f9dc8f453e6c7095c46e8d3a1d4ffd8 WatchSource:0}: Error finding container c52bf3b8041eb179d87b05e9a2a9f7ee6f9dc8f453e6c7095c46e8d3a1d4ffd8: Status 404 returned error can't find the container with id c52bf3b8041eb179d87b05e9a2a9f7ee6f9dc8f453e6c7095c46e8d3a1d4ffd8 Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.402793 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3183b0f5-bf03-4265-a2e6-b05f05841663-metrics-tls\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.402845 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403101 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn4jn\" (UniqueName: \"kubernetes.io/projected/3183b0f5-bf03-4265-a2e6-b05f05841663-kube-api-access-kn4jn\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403190 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djz5p\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403213 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403272 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403288 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403301 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403347 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcclb\" (UniqueName: \"kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403365 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.403379 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.404277 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.404373 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.405042 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:51.905024808 +0000 UTC m=+153.473038028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.452349 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.505035 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.505229 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.005202656 +0000 UTC m=+153.573215826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.505438 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.505475 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: W0218 00:36:51.506407 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf6cb538_0c36_4140_a49f_2fcf19d46169.slice/crio-7f4b0b1363a286a0f4ab23ebb853673e3b266c8cb8adc632b318b878580e60f3 WatchSource:0}: Error finding container 7f4b0b1363a286a0f4ab23ebb853673e3b266c8cb8adc632b318b878580e60f3: Status 404 returned error can't find the container with id 7f4b0b1363a286a0f4ab23ebb853673e3b266c8cb8adc632b318b878580e60f3 Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506690 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-registration-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506727 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-certs\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506800 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2cks\" (UniqueName: \"kubernetes.io/projected/ceddb914-6af3-4417-861b-20ac241c337d-kube-api-access-c2cks\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506836 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3183b0f5-bf03-4265-a2e6-b05f05841663-metrics-tls\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506873 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-plugins-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506903 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-node-bootstrap-token\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.506948 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507055 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn4jn\" (UniqueName: \"kubernetes.io/projected/3183b0f5-bf03-4265-a2e6-b05f05841663-kube-api-access-kn4jn\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507125 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djz5p\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507142 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-csi-data-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507209 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507235 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507250 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcwvq\" (UniqueName: \"kubernetes.io/projected/ca687e44-9ae4-4469-9ccf-6ed82a75c235-kube-api-access-dcwvq\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507288 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507305 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507359 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-mountpoint-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507383 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-socket-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.507493 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.007484441 +0000 UTC m=+153.575497611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507547 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcclb\" (UniqueName: \"kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507607 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.507624 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.512285 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.523199 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.525138 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.527780 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.528717 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.533279 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.547670 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.558296 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.561257 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.564411 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djz5p\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.565763 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3183b0f5-bf03-4265-a2e6-b05f05841663-metrics-tls\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.584406 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.603877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcclb\" (UniqueName: \"kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb\") pod \"collect-profiles-29522910-zlz2k\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.611513 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.611647 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.111624341 +0000 UTC m=+153.679637511 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.611785 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcwvq\" (UniqueName: \"kubernetes.io/projected/ca687e44-9ae4-4469-9ccf-6ed82a75c235-kube-api-access-dcwvq\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.611849 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-mountpoint-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.611869 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-socket-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612008 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-mountpoint-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612018 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612116 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-registration-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612142 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-certs\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612195 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2cks\" (UniqueName: \"kubernetes.io/projected/ceddb914-6af3-4417-861b-20ac241c337d-kube-api-access-c2cks\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612211 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-plugins-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612230 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-node-bootstrap-token\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612278 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-csi-data-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612352 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-registration-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612273 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-socket-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.612398 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.112387457 +0000 UTC m=+153.680400727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612410 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-plugins-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.612657 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-t5878"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.614421 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ceddb914-6af3-4417-861b-20ac241c337d-csi-data-dir\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.615734 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-certs\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.617483 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.619037 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.622921 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn4jn\" (UniqueName: \"kubernetes.io/projected/3183b0f5-bf03-4265-a2e6-b05f05841663-kube-api-access-kn4jn\") pod \"dns-operator-744455d44c-2mrz5\" (UID: \"3183b0f5-bf03-4265-a2e6-b05f05841663\") " pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.623399 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ca687e44-9ae4-4469-9ccf-6ed82a75c235-node-bootstrap-token\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.624828 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.654678 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.682679 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcwvq\" (UniqueName: \"kubernetes.io/projected/ca687e44-9ae4-4469-9ccf-6ed82a75c235-kube-api-access-dcwvq\") pod \"machine-config-server-6xhp5\" (UID: \"ca687e44-9ae4-4469-9ccf-6ed82a75c235\") " pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.688481 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2cks\" (UniqueName: \"kubernetes.io/projected/ceddb914-6af3-4417-861b-20ac241c337d-kube-api-access-c2cks\") pod \"csi-hostpathplugin-l4ktw\" (UID: \"ceddb914-6af3-4417-861b-20ac241c337d\") " pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.692011 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.712844 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.713232 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.213211036 +0000 UTC m=+153.781224206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: W0218 00:36:51.713489 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5826b603_6616_49b5_a4d0_9dd63e715c9e.slice/crio-bef4ce783993ea1f0ff6423d0682de5932fbb16eb44edfded66ad664d2403ad3 WatchSource:0}: Error finding container bef4ce783993ea1f0ff6423d0682de5932fbb16eb44edfded66ad664d2403ad3: Status 404 returned error can't find the container with id bef4ce783993ea1f0ff6423d0682de5932fbb16eb44edfded66ad664d2403ad3 Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.716724 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt"] Feb 18 00:36:51 crc kubenswrapper[4791]: W0218 00:36:51.718903 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40569d9f_89af_4f68_855d_88d881e71de5.slice/crio-3123add067e5727491830565889c3add2e680bd63620ccb38de3cbe9cf6fa06e WatchSource:0}: Error finding container 3123add067e5727491830565889c3add2e680bd63620ccb38de3cbe9cf6fa06e: Status 404 returned error can't find the container with id 3123add067e5727491830565889c3add2e680bd63620ccb38de3cbe9cf6fa06e Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.718953 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.721578 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.815108 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.816353 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.316337712 +0000 UTC m=+153.884350882 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.842721 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.919956 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.920053 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.420030977 +0000 UTC m=+153.988044137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.920288 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:51 crc kubenswrapper[4791]: E0218 00:36:51.920753 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.420744559 +0000 UTC m=+153.988757729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.966518 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.966568 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.971657 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6xhp5" Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.979619 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll"] Feb 18 00:36:51 crc kubenswrapper[4791]: I0218 00:36:51.989266 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.021076 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.021302 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.521274409 +0000 UTC m=+154.089287579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.021436 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.021725 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.521717144 +0000 UTC m=+154.089730314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: W0218 00:36:52.075542 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca687e44_9ae4_4469_9ccf_6ed82a75c235.slice/crio-051781918f13223643fbefea290b329e164d149a423ac50a4a2c4e442c6655c0 WatchSource:0}: Error finding container 051781918f13223643fbefea290b329e164d149a423ac50a4a2c4e442c6655c0: Status 404 returned error can't find the container with id 051781918f13223643fbefea290b329e164d149a423ac50a4a2c4e442c6655c0 Feb 18 00:36:52 crc kubenswrapper[4791]: W0218 00:36:52.101300 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6edc9cce_f90d_4bff_bc33_43ff4051944f.slice/crio-740307e1e7db09923542f803562abb956390c8afa895fa399d135ab4e8668706 WatchSource:0}: Error finding container 740307e1e7db09923542f803562abb956390c8afa895fa399d135ab4e8668706: Status 404 returned error can't find the container with id 740307e1e7db09923542f803562abb956390c8afa895fa399d135ab4e8668706 Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.122038 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.122180 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.62214178 +0000 UTC m=+154.190154950 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.122394 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.122741 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.622714039 +0000 UTC m=+154.190727209 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.223459 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.224010 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.723994944 +0000 UTC m=+154.292008114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.237811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-qj494" event={"ID":"cf6cb538-0c36-4140-a49f-2fcf19d46169","Type":"ContainerStarted","Data":"7f4b0b1363a286a0f4ab23ebb853673e3b266c8cb8adc632b318b878580e60f3"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.240210 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" event={"ID":"6edc9cce-f90d-4bff-bc33-43ff4051944f","Type":"ContainerStarted","Data":"740307e1e7db09923542f803562abb956390c8afa895fa399d135ab4e8668706"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.241762 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" event={"ID":"dc760ce4-d81e-4d1e-9341-1d8b40596f4c","Type":"ContainerStarted","Data":"c52bf3b8041eb179d87b05e9a2a9f7ee6f9dc8f453e6c7095c46e8d3a1d4ffd8"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.243578 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" event={"ID":"c1261ad9-b566-419e-ad9a-e7b361edd24a","Type":"ContainerStarted","Data":"dc80c75e631ddfe187b07f15ee9e80b1ac073885b8f92777fe678a5bd2e3cfc2"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.245170 4791 generic.go:334] "Generic (PLEG): container finished" podID="45cdb2a5-abaf-4f48-a61f-5ede25bd7bde" containerID="d16f27d0d703531378cb20e72c54e8fd9ca17405d329635b184d12e863a9fd35" exitCode=0 Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.245221 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" event={"ID":"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde","Type":"ContainerDied","Data":"d16f27d0d703531378cb20e72c54e8fd9ca17405d329635b184d12e863a9fd35"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.246842 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tk98h" event={"ID":"5826b603-6616-49b5-a4d0-9dd63e715c9e","Type":"ContainerStarted","Data":"bef4ce783993ea1f0ff6423d0682de5932fbb16eb44edfded66ad664d2403ad3"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.247729 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" event={"ID":"40569d9f-89af-4f68-855d-88d881e71de5","Type":"ContainerStarted","Data":"3123add067e5727491830565889c3add2e680bd63620ccb38de3cbe9cf6fa06e"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.248970 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rxhdk" event={"ID":"e1e9abd7-81f2-423d-8a79-c4102461680d","Type":"ContainerStarted","Data":"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.248988 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rxhdk" event={"ID":"e1e9abd7-81f2-423d-8a79-c4102461680d","Type":"ContainerStarted","Data":"a8ed8ca0b7f07f0fbcced222def52c425c713c64fcde17e6c6f9360cb9d3914f"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.250510 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" event={"ID":"e7603b81-f0fd-4f84-980d-672efe28d72f","Type":"ContainerStarted","Data":"5bbda6b0700a99e126a8391ebf3e3b3c7616cdeded8ddb4b281e8742026536e9"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.252143 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" event={"ID":"da348cdf-f9c8-4e7c-b462-7d4979dada11","Type":"ContainerStarted","Data":"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.252416 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.253899 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" event={"ID":"a0084382-ff2c-4c0d-8460-b0694790a78b","Type":"ContainerStarted","Data":"aea63a5a0c2a3af2b0baf1e6d4e03389c64385f664c96e791fcbfc3ef8a155d7"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.255511 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" event={"ID":"e0149f47-a655-434f-982e-b8872ea180c4","Type":"ContainerStarted","Data":"0b597c0dee2ca3bdf848a1915359932fa7493c26c957b74f5750fe2b459fe447"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.256774 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" event={"ID":"929ec2d3-2a0e-4031-9a2a-08b9744e2b40","Type":"ContainerStarted","Data":"7209686aed330226d930ac955881d5c083e85b845896d42b8ef9b606add357e3"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.259097 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" event={"ID":"12d0d3a0-841c-4a06-aad2-c52c33800392","Type":"ContainerStarted","Data":"0ecf41174ac70d793054b9c921d8635b43398b756899087283011f563fd4b5aa"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.272655 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" event={"ID":"7fa9bca6-1980-4a13-9c30-5d693c51b72c","Type":"ContainerStarted","Data":"9163b5dddaba2f71c83910db14a34c8622cb13c3cd8370d3ffc0eca2f4362149"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.273726 4791 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-w8nfn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.273760 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.320578 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" event={"ID":"b06fac8f-e399-462f-9665-35781318a69d","Type":"ContainerStarted","Data":"4f107c6a6d94f86aa5fec4eb451d2f11b9a32314f901d7149816fb7fdd82a935"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.325738 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.334513 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh"] Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.328232 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.828133114 +0000 UTC m=+154.396146274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.327705 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.341440 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" event={"ID":"668a019f-7e1c-4aca-a07c-3f22308a66c2","Type":"ContainerStarted","Data":"ce7beb95a906a9876ca676d0150df0ca08c34c4823344bf0ef8a654baac543e6"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.343630 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" event={"ID":"523f9b10-74b8-45bb-b400-054b069286dc","Type":"ContainerStarted","Data":"b411828438e95e7565e715e617e9f75eee5d04a13b02c38ee3687ebb9c6748fe"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.347595 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" event={"ID":"f5c71b3e-957e-466a-97c6-b114ee0eea13","Type":"ContainerStarted","Data":"37844becc56383621c38293c3e32b1284deb086668eb3e8a2be4e602ffd5460b"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.356389 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" event={"ID":"a7563a68-6d05-4beb-8d19-ab941578e67e","Type":"ContainerStarted","Data":"f6afe6ded1b9caa15c0823e1dd9934653de2475ea6fc808a30aedbe307972d1d"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.359825 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.365644 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" event={"ID":"a4f1858f-9974-4098-a264-b981c587623b","Type":"ContainerStarted","Data":"31e284b4f0d287672cf739d16d1c879419c22a657b47742c8340584618f16206"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.365680 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" event={"ID":"a4f1858f-9974-4098-a264-b981c587623b","Type":"ContainerStarted","Data":"28981b65d13ae9c79a43ec05f3b94a8aa936a2391bf3068b402dd54b09c01b2f"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.373653 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" event={"ID":"d465afa1-5937-4edd-a90f-27cec9705554","Type":"ContainerStarted","Data":"ba010cfa227cca4a3670c4e2bfab456a3e0ece10e899d87e337104f6c35f3013"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.374274 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.386346 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6xhp5" event={"ID":"ca687e44-9ae4-4469-9ccf-6ed82a75c235","Type":"ContainerStarted","Data":"051781918f13223643fbefea290b329e164d149a423ac50a4a2c4e442c6655c0"} Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.391676 4791 generic.go:334] "Generic (PLEG): container finished" podID="8eb994e6-6e8b-4d0f-84f2-92333f1571c1" containerID="110044f8246c477433d8468487c7bc27afdb484a420dd01530e25cd5e5dcb5b5" exitCode=0 Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.391815 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" event={"ID":"8eb994e6-6e8b-4d0f-84f2-92333f1571c1","Type":"ContainerDied","Data":"110044f8246c477433d8468487c7bc27afdb484a420dd01530e25cd5e5dcb5b5"} Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.435636 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.935603663 +0000 UTC m=+154.503616843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.437378 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.437662 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.438064 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:52.938055355 +0000 UTC m=+154.506068525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.461806 4791 patch_prober.go:28] interesting pod/console-operator-58897d9998-r4t2n container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.461846 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" podUID="d465afa1-5937-4edd-a90f-27cec9705554" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.540099 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.541370 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.041340586 +0000 UTC m=+154.609353756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.587379 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-v7s8c" podStartSLOduration=127.58736 podStartE2EDuration="2m7.58736s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:52.58313282 +0000 UTC m=+154.151145990" watchObservedRunningTime="2026-02-18 00:36:52.58736 +0000 UTC m=+154.155373170" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.645809 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.646106 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.146094145 +0000 UTC m=+154.714107315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.669845 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-rwmnp" podStartSLOduration=126.669830422 podStartE2EDuration="2m6.669830422s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:52.669674707 +0000 UTC m=+154.237687887" watchObservedRunningTime="2026-02-18 00:36:52.669830422 +0000 UTC m=+154.237843592" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.676669 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.687945 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vlrzr"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.715608 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.722336 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.740804 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fsfrp"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.743498 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29522880-928kh" podStartSLOduration=127.743222363 podStartE2EDuration="2m7.743222363s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:52.742179368 +0000 UTC m=+154.310192558" watchObservedRunningTime="2026-02-18 00:36:52.743222363 +0000 UTC m=+154.311235573" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.746666 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.746996 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.246982227 +0000 UTC m=+154.814995397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.780707 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-rxhdk" podStartSLOduration=127.780690754 podStartE2EDuration="2m7.780690754s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:52.777537269 +0000 UTC m=+154.345550439" watchObservedRunningTime="2026-02-18 00:36:52.780690754 +0000 UTC m=+154.348703924" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.785614 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2ph78"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.789994 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw"] Feb 18 00:36:52 crc kubenswrapper[4791]: W0218 00:36:52.790709 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83ea23eb_c756_4262_a2d7_218bc3fb25ac.slice/crio-f6407c97193ed649b5c43ffe870eec4e49620b8b767cc2196420d02af5563df8 WatchSource:0}: Error finding container f6407c97193ed649b5c43ffe870eec4e49620b8b767cc2196420d02af5563df8: Status 404 returned error can't find the container with id f6407c97193ed649b5c43ffe870eec4e49620b8b767cc2196420d02af5563df8 Feb 18 00:36:52 crc kubenswrapper[4791]: W0218 00:36:52.797690 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8671eab_3f50_466b_b6aa_f20c971e1b7e.slice/crio-928f835298496c3a4bb6e58127ece5b6aae728f5a796e65b02527546c26d0aad WatchSource:0}: Error finding container 928f835298496c3a4bb6e58127ece5b6aae728f5a796e65b02527546c26d0aad: Status 404 returned error can't find the container with id 928f835298496c3a4bb6e58127ece5b6aae728f5a796e65b02527546c26d0aad Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.848753 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.849293 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.349280176 +0000 UTC m=+154.917293346 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: W0218 00:36:52.879103 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ec919db_352a_41eb_9564_dfc8add29680.slice/crio-38b79c8fae93c2d293a7fc874e8ea24af0f22287920a61b07e874d109b922ed8 WatchSource:0}: Error finding container 38b79c8fae93c2d293a7fc874e8ea24af0f22287920a61b07e874d109b922ed8: Status 404 returned error can't find the container with id 38b79c8fae93c2d293a7fc874e8ea24af0f22287920a61b07e874d109b922ed8 Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.893636 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2mrz5"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.929799 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pxzzf"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.932070 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.941467 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l4ktw"] Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.944608 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" podStartSLOduration=127.944589933 podStartE2EDuration="2m7.944589933s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:52.944348475 +0000 UTC m=+154.512361645" watchObservedRunningTime="2026-02-18 00:36:52.944589933 +0000 UTC m=+154.512603103" Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.950749 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.950955 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.450922052 +0000 UTC m=+155.018935222 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.951016 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:52 crc kubenswrapper[4791]: E0218 00:36:52.951320 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.451307755 +0000 UTC m=+155.019320915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:52 crc kubenswrapper[4791]: I0218 00:36:52.953541 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k"] Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.043143 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn"] Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.051845 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.052339 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.552318771 +0000 UTC m=+155.120331951 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.138449 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" podStartSLOduration=128.138432763 podStartE2EDuration="2m8.138432763s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.137957218 +0000 UTC m=+154.705970388" watchObservedRunningTime="2026-02-18 00:36:53.138432763 +0000 UTC m=+154.706445933" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.153530 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.153848 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.653834903 +0000 UTC m=+155.221848073 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.254851 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.255143 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.755116658 +0000 UTC m=+155.323129838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.356819 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.359218 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.859202996 +0000 UTC m=+155.427216166 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.422114 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" event={"ID":"6edc9cce-f90d-4bff-bc33-43ff4051944f","Type":"ContainerStarted","Data":"76c1a0180d01df3faa911cf2d34ee9313a80a02adaabdd1db10fa1271c7769e1"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.429530 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" event={"ID":"d8671eab-3f50-466b-b6aa-f20c971e1b7e","Type":"ContainerStarted","Data":"ce18945eda2df539d5ec1c96f71d82d9de3bce6f6a16f25a64174cede5081b1f"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.429576 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" event={"ID":"d8671eab-3f50-466b-b6aa-f20c971e1b7e","Type":"ContainerStarted","Data":"928f835298496c3a4bb6e58127ece5b6aae728f5a796e65b02527546c26d0aad"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.434696 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" event={"ID":"dc760ce4-d81e-4d1e-9341-1d8b40596f4c","Type":"ContainerStarted","Data":"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.435303 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.439399 4791 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gxtkn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.439433 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.439978 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" event={"ID":"e7603b81-f0fd-4f84-980d-672efe28d72f","Type":"ContainerStarted","Data":"900940cf0080655e9c407481812a912be17ca544609c780ab86fbcd6959d072f"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.440014 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" event={"ID":"e7603b81-f0fd-4f84-980d-672efe28d72f","Type":"ContainerStarted","Data":"7f318fe46a3a43ac617c2e3d8be629290c3dd3a5cbc88743cd7e27df06fdfd92"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.440571 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.444768 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" event={"ID":"12d0d3a0-841c-4a06-aad2-c52c33800392","Type":"ContainerStarted","Data":"8045d89bcaa0cefa9349059c6b4f9669deeb58caa73ce8849cd3ed0ff345142f"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.444802 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" event={"ID":"12d0d3a0-841c-4a06-aad2-c52c33800392","Type":"ContainerStarted","Data":"e01ba20e36ff13f05790f9ebbf4d230034f225659a1a016120cb1b76c3bfe702"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.463780 4791 generic.go:334] "Generic (PLEG): container finished" podID="929ec2d3-2a0e-4031-9a2a-08b9744e2b40" containerID="59d16fdaf9516496c32a70b1e292879a9d69e1f1b1032fff8ff1bdd5eca49ef5" exitCode=0 Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.463867 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" event={"ID":"929ec2d3-2a0e-4031-9a2a-08b9744e2b40","Type":"ContainerDied","Data":"59d16fdaf9516496c32a70b1e292879a9d69e1f1b1032fff8ff1bdd5eca49ef5"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.468736 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.469029 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:53.969015513 +0000 UTC m=+155.537028683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.491079 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" event={"ID":"40569d9f-89af-4f68-855d-88d881e71de5","Type":"ContainerStarted","Data":"c5466661bc6f7b81d9eda3c64ef39029b3f6ef86b56ff2aeab46e67a45fe4121"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.494784 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" event={"ID":"73644233-2e9a-458d-8513-f56dddce1c55","Type":"ContainerStarted","Data":"a6a8237fdd9804c4cbda605ccad86ff114e98a3eda9f8a997b4a72a014fff732"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.505589 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-82c2d" podStartSLOduration=127.505573194 podStartE2EDuration="2m7.505573194s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.504485468 +0000 UTC m=+155.072498638" watchObservedRunningTime="2026-02-18 00:36:53.505573194 +0000 UTC m=+155.073586354" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.506240 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" event={"ID":"a7563a68-6d05-4beb-8d19-ab941578e67e","Type":"ContainerStarted","Data":"7bdf11fd490f89e80a67c782c8dd5ea44296e4b4d961a83c5536c4bf654fe6c8"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.535116 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" event={"ID":"45cdb2a5-abaf-4f48-a61f-5ede25bd7bde","Type":"ContainerStarted","Data":"82b414e98de381a0fed314fc9ffa300cec7001e2d70f10ee44f4d81d85c36777"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.569951 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.570977 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" event={"ID":"85eab95a-ee70-497d-a22d-1cf047cb906a","Type":"ContainerStarted","Data":"0d9f0c39d31ef794d92c7d19e5883a70c34913b98d73bc010cd459d1f1e504b1"} Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.572801 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.07278807 +0000 UTC m=+155.640801240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.575138 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" event={"ID":"b06fac8f-e399-462f-9665-35781318a69d","Type":"ContainerStarted","Data":"3d67c04436ec448b8342c892889493ef651b1f92a29a75dbe63945157f5096be"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.579974 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" event={"ID":"8eb994e6-6e8b-4d0f-84f2-92333f1571c1","Type":"ContainerStarted","Data":"42e2456f3d1a3f203b46da57749234cb8f811fc5468d2ad2b1ff570ba58bfd7b"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.597356 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" event={"ID":"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638","Type":"ContainerStarted","Data":"5590feeb189be3708c1514ebed7822b22519082ee8e7848474f867529898bcda"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.599517 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" event={"ID":"523f9b10-74b8-45bb-b400-054b069286dc","Type":"ContainerStarted","Data":"3293320f2b0f8706add676aac60a00485553963193f60f14d89a7540da276367"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.638510 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8tsx6" podStartSLOduration=128.638493787 podStartE2EDuration="2m8.638493787s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.633968616 +0000 UTC m=+155.201981786" watchObservedRunningTime="2026-02-18 00:36:53.638493787 +0000 UTC m=+155.206506957" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.670820 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.671921 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.171903433 +0000 UTC m=+155.739916603 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.674292 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-qj494" event={"ID":"cf6cb538-0c36-4140-a49f-2fcf19d46169","Type":"ContainerStarted","Data":"11f06c8a586e492e7a3e01f9c9df18b20aee43ed871abe3e746bb2ada63c9ad3"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.675747 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.684617 4791 patch_prober.go:28] interesting pod/downloads-7954f5f757-qj494 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.684682 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qj494" podUID="cf6cb538-0c36-4140-a49f-2fcf19d46169" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.687290 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" event={"ID":"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc","Type":"ContainerStarted","Data":"f6769771fdc1b3a5b6a0de07cc59704543cbded8dc2090c1906d9c1d187bb214"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.711721 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" podStartSLOduration=127.711705342 podStartE2EDuration="2m7.711705342s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.709426406 +0000 UTC m=+155.277439576" watchObservedRunningTime="2026-02-18 00:36:53.711705342 +0000 UTC m=+155.279718512" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.712237 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xhnwk" podStartSLOduration=128.712231699 podStartE2EDuration="2m8.712231699s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.675992508 +0000 UTC m=+155.244005678" watchObservedRunningTime="2026-02-18 00:36:53.712231699 +0000 UTC m=+155.280244869" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.732604 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerStarted","Data":"687fd3256454c31d6944134a5ed3ecce5a11b684076625c7b19a9c8ebcb77ae5"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.733534 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.735800 4791 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plj59 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.735858 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.738526 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6xhp5" event={"ID":"ca687e44-9ae4-4469-9ccf-6ed82a75c235","Type":"ContainerStarted","Data":"b3760719e5119e916349b13272512899da03e6d6963cb9e19ad562d09d0fe5bd"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.766665 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" event={"ID":"c1261ad9-b566-419e-ad9a-e7b361edd24a","Type":"ContainerStarted","Data":"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.767828 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.769749 4791 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2m72p container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.769801 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.773602 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.773922 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.273909302 +0000 UTC m=+155.841922472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.776614 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" podStartSLOduration=128.776599511 podStartE2EDuration="2m8.776599511s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.77295219 +0000 UTC m=+155.340965370" watchObservedRunningTime="2026-02-18 00:36:53.776599511 +0000 UTC m=+155.344612691" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.802096 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" event={"ID":"e0149f47-a655-434f-982e-b8872ea180c4","Type":"ContainerStarted","Data":"e4b83770c4885dec0cfde2d95a1451aa026cef89a0219226f5709df510feb200"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.802141 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" event={"ID":"e0149f47-a655-434f-982e-b8872ea180c4","Type":"ContainerStarted","Data":"77fbfa6bd34c1588cb57a906c1eaaa48ad4f2979a31a1360629f9f52a17f26eb"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.837779 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jhdhd" podStartSLOduration=127.837759427 podStartE2EDuration="2m7.837759427s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.83544601 +0000 UTC m=+155.403459180" watchObservedRunningTime="2026-02-18 00:36:53.837759427 +0000 UTC m=+155.405772597" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.875343 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.875472 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.375453025 +0000 UTC m=+155.943466195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.875703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.876041 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.376032585 +0000 UTC m=+155.944045755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.901142 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" event={"ID":"a0084382-ff2c-4c0d-8460-b0694790a78b","Type":"ContainerStarted","Data":"a6833cbc8f5b3d609e02da0389bec8e44db2dc074fa272246a9aa2ee33057a63"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.915489 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7swxh" podStartSLOduration=128.915456031 podStartE2EDuration="2m8.915456031s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:53.902060207 +0000 UTC m=+155.470073377" watchObservedRunningTime="2026-02-18 00:36:53.915456031 +0000 UTC m=+155.483469201" Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.932358 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vlrzr" event={"ID":"83ea23eb-c756-4262-a2d7-218bc3fb25ac","Type":"ContainerStarted","Data":"f6407c97193ed649b5c43ffe870eec4e49620b8b767cc2196420d02af5563df8"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.955576 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" event={"ID":"668a019f-7e1c-4aca-a07c-3f22308a66c2","Type":"ContainerStarted","Data":"bdbbfe9758aa9c3b89f189fd0202be5f2a3b1c9ef0476d04d97fc03b2bedbb50"} Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.978280 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:53 crc kubenswrapper[4791]: E0218 00:36:53.979492 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.479477171 +0000 UTC m=+156.047490331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:53 crc kubenswrapper[4791]: I0218 00:36:53.980662 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" event={"ID":"ceddb914-6af3-4417-861b-20ac241c337d","Type":"ContainerStarted","Data":"093c183964b6f622268e0dc1be0f9da887c8a81afcbaaf583e4f973665d7bec1"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.021414 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" event={"ID":"5ec919db-352a-41eb-9564-dfc8add29680","Type":"ContainerStarted","Data":"38b79c8fae93c2d293a7fc874e8ea24af0f22287920a61b07e874d109b922ed8"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.050899 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tk98h" event={"ID":"5826b603-6616-49b5-a4d0-9dd63e715c9e","Type":"ContainerStarted","Data":"efa60d570903083b3efb91baca22f6f029dd61c9939a7d97dbceaf0c7d35031b"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.063030 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-9cr2f" podStartSLOduration=128.062999277 podStartE2EDuration="2m8.062999277s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.06036993 +0000 UTC m=+155.628383100" watchObservedRunningTime="2026-02-18 00:36:54.062999277 +0000 UTC m=+155.631012447" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.063293 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" podStartSLOduration=128.063289397 podStartE2EDuration="2m8.063289397s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.009411142 +0000 UTC m=+155.577424312" watchObservedRunningTime="2026-02-18 00:36:54.063289397 +0000 UTC m=+155.631302567" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.064380 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" event={"ID":"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02","Type":"ContainerStarted","Data":"1701713f5ff218a737090ca561a699ae2adeeacd4384a68fee2473c6045c565f"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.075875 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" event={"ID":"3183b0f5-bf03-4265-a2e6-b05f05841663","Type":"ContainerStarted","Data":"734253eff578b8751c103eb1833639d458af066dd9ad29f2cc10459e173eac7e"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.081531 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fsfrp" event={"ID":"c6450a7c-d805-4e80-aee0-ae49750d8dc7","Type":"ContainerStarted","Data":"9eb31e9ea0796e9e66b4bb40951da6122aa17e9ef07f9e4b8bc53500e72653c1"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.082220 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.083767 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.583750375 +0000 UTC m=+156.151763545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.091385 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" event={"ID":"7fa9bca6-1980-4a13-9c30-5d693c51b72c","Type":"ContainerStarted","Data":"a63bc9069ea6b9db56ad342c65b3d81f3e041da67f19337b021095d4b30c5c23"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.092238 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.101474 4791 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-82mpq container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" start-of-body= Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.101525 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" podUID="7fa9bca6-1980-4a13-9c30-5d693c51b72c" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.104136 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" podStartSLOduration=128.104125699 podStartE2EDuration="2m8.104125699s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.101605116 +0000 UTC m=+155.669618286" watchObservedRunningTime="2026-02-18 00:36:54.104125699 +0000 UTC m=+155.672138869" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.117836 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" event={"ID":"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb","Type":"ContainerStarted","Data":"9b739b7965bdf359938ccf64085a6f2835a04f96163c6be02cd6d9e5d94ab44f"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.126545 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rfkpk" podStartSLOduration=128.126530351 podStartE2EDuration="2m8.126530351s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.126393148 +0000 UTC m=+155.694406318" watchObservedRunningTime="2026-02-18 00:36:54.126530351 +0000 UTC m=+155.694543521" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.132894 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" event={"ID":"86198f6e-9eb6-4058-bf71-543f8283d54c","Type":"ContainerStarted","Data":"a4261b1e5cf901d19eca869ffa0a29d9e2aea647d8636f55f048cdf7262fd729"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.132972 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" event={"ID":"86198f6e-9eb6-4058-bf71-543f8283d54c","Type":"ContainerStarted","Data":"024cef803460b46762fc687d3152906522f52f3ef0bda86bcb39c69dcfaba87f"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.141201 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" event={"ID":"0c4d0046-4e3a-45c8-9d0c-185714e546d0","Type":"ContainerStarted","Data":"b6bffae275e250a5c2b154710c1c6e3a1b0c0d0b710b7acf2c1713aa4632ca7d"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.141240 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" event={"ID":"0c4d0046-4e3a-45c8-9d0c-185714e546d0","Type":"ContainerStarted","Data":"b66c5511c07259ace47e883f87d3372d948a27938bba8a115ca57bbbe833acd4"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.141834 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.147479 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" event={"ID":"58de0ec6-b12b-4dcb-b0cb-95f2f786467b","Type":"ContainerStarted","Data":"c0ada2d3286dcb47f0ac076c6a2d2e6d20b4a105afd231f355ad0f6b49a59982"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.147525 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" event={"ID":"58de0ec6-b12b-4dcb-b0cb-95f2f786467b","Type":"ContainerStarted","Data":"3cffd6fd15fb20586f6b17035294a18f7f6703313b4e0d1ef57443db0d36f5cb"} Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.152779 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.156018 4791 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-dnxrh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.156099 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" podUID="0c4d0046-4e3a-45c8-9d0c-185714e546d0" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.159231 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-r4t2n" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.170744 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-6xhp5" podStartSLOduration=6.170727106 podStartE2EDuration="6.170727106s" podCreationTimestamp="2026-02-18 00:36:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.170013642 +0000 UTC m=+155.738026812" watchObservedRunningTime="2026-02-18 00:36:54.170727106 +0000 UTC m=+155.738740286" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.183723 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.185203 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.685181595 +0000 UTC m=+156.253194765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.192679 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4zz5m" podStartSLOduration=129.192664993 podStartE2EDuration="2m9.192664993s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.190942925 +0000 UTC m=+155.758956095" watchObservedRunningTime="2026-02-18 00:36:54.192664993 +0000 UTC m=+155.760678163" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.242092 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-qj494" podStartSLOduration=129.242076579 podStartE2EDuration="2m9.242076579s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.218614122 +0000 UTC m=+155.786627292" watchObservedRunningTime="2026-02-18 00:36:54.242076579 +0000 UTC m=+155.810089749" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.285935 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.286919 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.786907214 +0000 UTC m=+156.354920384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.299222 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" podStartSLOduration=128.299205032 podStartE2EDuration="2m8.299205032s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.298422715 +0000 UTC m=+155.866435885" watchObservedRunningTime="2026-02-18 00:36:54.299205032 +0000 UTC m=+155.867218202" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.299754 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" podStartSLOduration=129.29974846 podStartE2EDuration="2m9.29974846s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.241113518 +0000 UTC m=+155.809126688" watchObservedRunningTime="2026-02-18 00:36:54.29974846 +0000 UTC m=+155.867761630" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.313598 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.332050 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-tk98h" podStartSLOduration=129.332035569 podStartE2EDuration="2m9.332035569s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.329309468 +0000 UTC m=+155.897322638" watchObservedRunningTime="2026-02-18 00:36:54.332035569 +0000 UTC m=+155.900048739" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.337080 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:54 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:54 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:54 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.337125 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.375296 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" podStartSLOduration=128.375280462 podStartE2EDuration="2m8.375280462s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.374749414 +0000 UTC m=+155.942762584" watchObservedRunningTime="2026-02-18 00:36:54.375280462 +0000 UTC m=+155.943293632" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.387229 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.387528 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.887510596 +0000 UTC m=+156.455523786 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.429193 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-fsfrp" podStartSLOduration=6.429178416 podStartE2EDuration="6.429178416s" podCreationTimestamp="2026-02-18 00:36:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.42748571 +0000 UTC m=+155.995498880" watchObservedRunningTime="2026-02-18 00:36:54.429178416 +0000 UTC m=+155.997191586" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.470840 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tlnpm" podStartSLOduration=128.470824096 podStartE2EDuration="2m8.470824096s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.464549258 +0000 UTC m=+156.032562418" watchObservedRunningTime="2026-02-18 00:36:54.470824096 +0000 UTC m=+156.038837266" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.491034 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.495365 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:54.995351399 +0000 UTC m=+156.563364569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.515836 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" podStartSLOduration=129.515818906 podStartE2EDuration="2m9.515818906s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.51351192 +0000 UTC m=+156.081525090" watchObservedRunningTime="2026-02-18 00:36:54.515818906 +0000 UTC m=+156.083832076" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.580612 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" podStartSLOduration=128.580594532 podStartE2EDuration="2m8.580594532s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.553445353 +0000 UTC m=+156.121458523" watchObservedRunningTime="2026-02-18 00:36:54.580594532 +0000 UTC m=+156.148607702" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.581971 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" podStartSLOduration=129.581964487 podStartE2EDuration="2m9.581964487s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.580447597 +0000 UTC m=+156.148460767" watchObservedRunningTime="2026-02-18 00:36:54.581964487 +0000 UTC m=+156.149977657" Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.596469 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.596816 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.096801589 +0000 UTC m=+156.664814759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.699142 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.699617 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.199602614 +0000 UTC m=+156.767615784 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.800783 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.800908 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.300890149 +0000 UTC m=+156.868903319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.801135 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.801414 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.301404525 +0000 UTC m=+156.869417695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:54 crc kubenswrapper[4791]: I0218 00:36:54.901981 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:54 crc kubenswrapper[4791]: E0218 00:36:54.902348 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.402333098 +0000 UTC m=+156.970346268 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.003030 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.003456 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.503440297 +0000 UTC m=+157.071453467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.104647 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.105044 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.605025553 +0000 UTC m=+157.173038723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.152368 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" event={"ID":"929ec2d3-2a0e-4031-9a2a-08b9744e2b40","Type":"ContainerStarted","Data":"d213474e295357695171d0441ecf5930f4b50833c139fafec6a76cf16777b3e4"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.152502 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.154099 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" event={"ID":"85eab95a-ee70-497d-a22d-1cf047cb906a","Type":"ContainerStarted","Data":"5c4ee056de9fcac3af3a43320d6a85b07df2aa06ca5fca9e09d34f7807bcbc88"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.155725 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vlrzr" event={"ID":"83ea23eb-c756-4262-a2d7-218bc3fb25ac","Type":"ContainerStarted","Data":"cfea89f23b06547a4763db586d47d1d095fd5e8a68246955a0f1eb8209863cec"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.155766 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vlrzr" event={"ID":"83ea23eb-c756-4262-a2d7-218bc3fb25ac","Type":"ContainerStarted","Data":"87608e032e5d1b111e123aba1aa0f75a48e945c6d9280b30dc41d7c4c22e234d"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.155862 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-vlrzr" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.160921 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" event={"ID":"a7563a68-6d05-4beb-8d19-ab941578e67e","Type":"ContainerStarted","Data":"0108ca4217b67f0b9f047d7f26d52dbda9db042467b08af79b1a600ff87ebc04"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.162406 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" event={"ID":"8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc","Type":"ContainerStarted","Data":"d0c9ae858c4f99c1dabdc09b613208a32257a0d24ac8c94a5e6edef224b4e4fa"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.162624 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.163758 4791 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-t5rkn container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.163792 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" podUID="8ef3cbf2-9429-44d5-a7bd-fa1ef31707cc" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.165686 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" event={"ID":"3183b0f5-bf03-4265-a2e6-b05f05841663","Type":"ContainerStarted","Data":"cf8e4c9268d288834d4d400afd30c9a9ff51db863cae804d923390c47f716380"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.165739 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" event={"ID":"3183b0f5-bf03-4265-a2e6-b05f05841663","Type":"ContainerStarted","Data":"dd23ad0101602aa073386347f37c8165ab28590820c2a0b70ebd06e45af8ea19"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.167870 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xlh9r" event={"ID":"86198f6e-9eb6-4058-bf71-543f8283d54c","Type":"ContainerStarted","Data":"67456a1808126cc96044a56d0789b1b50b4468438d9454116dd79e71acb25729"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.169933 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" event={"ID":"ceddb914-6af3-4417-861b-20ac241c337d","Type":"ContainerStarted","Data":"f11618f7d478a0730ba8cac7703f297d21d01572bba107586396536bf88a1b32"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.171399 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fsfrp" event={"ID":"c6450a7c-d805-4e80-aee0-ae49750d8dc7","Type":"ContainerStarted","Data":"82e2095d9b07be9dcd9687a9c495e80dfde62a36740bb1cd9692cd6ff51e08d5"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.173275 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" event={"ID":"73644233-2e9a-458d-8513-f56dddce1c55","Type":"ContainerStarted","Data":"95f9a0463e117f9eac7370f2c7af990326207396d69da5fc6d4c600900cdd7db"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.173302 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" event={"ID":"73644233-2e9a-458d-8513-f56dddce1c55","Type":"ContainerStarted","Data":"2c8fb8a22091b4e27ac4431649dd8834fd20d5912899f8a06732c03b84973533"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.175271 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" event={"ID":"302ddff9-9052-4d4f-9b4b-b0c0b1b3a638","Type":"ContainerStarted","Data":"612114cb13ff6328f3442adf79a691955d77c3143995d52e22ccc9834f3f3ce9"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.175348 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" podStartSLOduration=130.175315491 podStartE2EDuration="2m10.175315491s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.174742012 +0000 UTC m=+156.742755172" watchObservedRunningTime="2026-02-18 00:36:55.175315491 +0000 UTC m=+156.743328661" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.175715 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" podStartSLOduration=129.175709664 podStartE2EDuration="2m9.175709664s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:54.666754986 +0000 UTC m=+156.234768156" watchObservedRunningTime="2026-02-18 00:36:55.175709664 +0000 UTC m=+156.743722834" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.176969 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7pmtt" event={"ID":"a0084382-ff2c-4c0d-8460-b0694790a78b","Type":"ContainerStarted","Data":"94584beaf7e196530b1bf6977c4ef761f95f66da924dc0f945793e8437b11f78"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.178420 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" event={"ID":"5ec919db-352a-41eb-9564-dfc8add29680","Type":"ContainerStarted","Data":"b9f99c2a00ee9d107a8a5a4caf447669659534b90a422968b94b9fdd70805920"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.179961 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pxzzf" event={"ID":"6f0f9444-6d63-4e2b-b63e-eb14589c5fdb","Type":"ContainerStarted","Data":"b0af44679d37401102af79b5688049fd055c51e4cd4b8d54b3907125b3c4e1cb"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.181259 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" event={"ID":"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02","Type":"ContainerStarted","Data":"4205f4b881d0c587d0aaf43aa7185653b959a5c4f9c11d22f324efb0c8116a2e"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.182749 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerStarted","Data":"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.183385 4791 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plj59 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.183418 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.187766 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" event={"ID":"8eb994e6-6e8b-4d0f-84f2-92333f1571c1","Type":"ContainerStarted","Data":"c837459652931f4b13af6a7c0135f62e46687e6b156717463c23e3735f360e0c"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.190778 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" event={"ID":"523f9b10-74b8-45bb-b400-054b069286dc","Type":"ContainerStarted","Data":"81705c070e33cbb708be81e58f21bfb6f330fc30b8d0993d5be7dc5b0dfe6b36"} Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.192016 4791 patch_prober.go:28] interesting pod/downloads-7954f5f757-qj494 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.192056 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qj494" podUID="cf6cb538-0c36-4140-a49f-2fcf19d46169" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.198376 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.203673 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dnxrh" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.206440 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.206751 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.706739462 +0000 UTC m=+157.274752632 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.221491 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" podStartSLOduration=129.221470629 podStartE2EDuration="2m9.221470629s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.220199718 +0000 UTC m=+156.788212888" watchObservedRunningTime="2026-02-18 00:36:55.221470629 +0000 UTC m=+156.789483799" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.304445 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hb7bm" podStartSLOduration=129.304432078 podStartE2EDuration="2m9.304432078s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.262595112 +0000 UTC m=+156.830608282" watchObservedRunningTime="2026-02-18 00:36:55.304432078 +0000 UTC m=+156.872445248" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.306440 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-vlrzr" podStartSLOduration=7.306433754 podStartE2EDuration="7.306433754s" podCreationTimestamp="2026-02-18 00:36:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.302488743 +0000 UTC m=+156.870501913" watchObservedRunningTime="2026-02-18 00:36:55.306433754 +0000 UTC m=+156.874446924" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.307093 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.307243 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.80722823 +0000 UTC m=+157.375241400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.308085 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.316769 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.816753595 +0000 UTC m=+157.384766755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.324565 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:55 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:55 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:55 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.324618 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.344136 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.375502 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fpmgd" podStartSLOduration=129.37548318 podStartE2EDuration="2m9.37548318s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.374460027 +0000 UTC m=+156.942473197" watchObservedRunningTime="2026-02-18 00:36:55.37548318 +0000 UTC m=+156.943496350" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.410403 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.410562 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.910539332 +0000 UTC m=+157.478552502 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.410623 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.410880 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:55.910869473 +0000 UTC m=+157.478882643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.435341 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.435642 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.461941 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2tcll" podStartSLOduration=129.461926904 podStartE2EDuration="2m9.461926904s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.461675686 +0000 UTC m=+157.029688856" watchObservedRunningTime="2026-02-18 00:36:55.461926904 +0000 UTC m=+157.029940074" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.515807 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.516126 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.016106989 +0000 UTC m=+157.584120149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.575672 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-2mrz5" podStartSLOduration=130.575656341 podStartE2EDuration="2m10.575656341s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.517609109 +0000 UTC m=+157.085622279" watchObservedRunningTime="2026-02-18 00:36:55.575656341 +0000 UTC m=+157.143669511" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.576709 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" podStartSLOduration=130.576702335 podStartE2EDuration="2m10.576702335s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.573651995 +0000 UTC m=+157.141665185" watchObservedRunningTime="2026-02-18 00:36:55.576702335 +0000 UTC m=+157.144715505" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.598288 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.598555 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.602637 4791 patch_prober.go:28] interesting pod/apiserver-76f77b778f-xdf9g container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.602686 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" podUID="8eb994e6-6e8b-4d0f-84f2-92333f1571c1" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.616940 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.617263 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.117251949 +0000 UTC m=+157.685265119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.634107 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.640127 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-2ph78" podStartSLOduration=130.640111796 podStartE2EDuration="2m10.640111796s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.623096553 +0000 UTC m=+157.191109723" watchObservedRunningTime="2026-02-18 00:36:55.640111796 +0000 UTC m=+157.208124966" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.687622 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9n2vw" podStartSLOduration=129.68760977 podStartE2EDuration="2m9.68760977s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.686511833 +0000 UTC m=+157.254525003" watchObservedRunningTime="2026-02-18 00:36:55.68760977 +0000 UTC m=+157.255622940" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.720650 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.720961 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.220945734 +0000 UTC m=+157.788958904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.746835 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-t5878" podStartSLOduration=129.746819531 podStartE2EDuration="2m9.746819531s" podCreationTimestamp="2026-02-18 00:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:55.737406809 +0000 UTC m=+157.305419979" watchObservedRunningTime="2026-02-18 00:36:55.746819531 +0000 UTC m=+157.314832701" Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.824703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.825243 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.325230788 +0000 UTC m=+157.893243958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.903334 4791 csr.go:261] certificate signing request csr-5dfpg is approved, waiting to be issued Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.931328 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.931522 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.431499878 +0000 UTC m=+157.999513048 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.931909 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:55 crc kubenswrapper[4791]: E0218 00:36:55.932286 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.432272133 +0000 UTC m=+158.000285303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:55 crc kubenswrapper[4791]: I0218 00:36:55.943404 4791 csr.go:257] certificate signing request csr-5dfpg is issued Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.034730 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.035219 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.535204463 +0000 UTC m=+158.103217633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.116020 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-82mpq" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.136625 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.136928 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.636917312 +0000 UTC m=+158.204930482 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.201070 4791 patch_prober.go:28] interesting pod/downloads-7954f5f757-qj494 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.201987 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qj494" podUID="cf6cb538-0c36-4140-a49f-2fcf19d46169" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.201266 4791 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plj59 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.202343 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.213029 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-px7q8" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.237597 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.237771 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.737743311 +0000 UTC m=+158.305756481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.237833 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.238092 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.738080712 +0000 UTC m=+158.306093883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.316018 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:56 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:56 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:56 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.316081 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.339226 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.339495 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.839468081 +0000 UTC m=+158.407481251 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.340635 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.343639 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.843624829 +0000 UTC m=+158.411637999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.346290 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-t5rkn" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.445482 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.445776 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:56.945760982 +0000 UTC m=+158.513774152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.549865 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.550185 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.05017409 +0000 UTC m=+158.618187260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.675305 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.683221 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.183150244 +0000 UTC m=+158.751163414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.776777 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.777085 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.277069016 +0000 UTC m=+158.845082196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.799936 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.800218 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.878391 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.878566 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.378539896 +0000 UTC m=+158.946553066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.878855 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.879194 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.379186028 +0000 UTC m=+158.947199198 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.944679 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-18 00:31:55 +0000 UTC, rotation deadline is 2026-11-12 04:28:30.948931354 +0000 UTC Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.944718 4791 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6411h51m34.004216625s for next certificate rotation Feb 18 00:36:56 crc kubenswrapper[4791]: I0218 00:36:56.980359 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:56 crc kubenswrapper[4791]: E0218 00:36:56.980728 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.480712911 +0000 UTC m=+159.048726081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.082233 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.082690 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.582674009 +0000 UTC m=+159.150687179 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.184115 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.184264 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.684239712 +0000 UTC m=+159.252252872 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.185599 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.185949 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.685937519 +0000 UTC m=+159.253950689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.204313 4791 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dvs8h container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.205324 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" podUID="929ec2d3-2a0e-4031-9a2a-08b9744e2b40" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.217541 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" event={"ID":"ceddb914-6af3-4417-861b-20ac241c337d","Type":"ContainerStarted","Data":"5e7e96e60b2dab71ffa199048213ffb5ce696a3b64cd19de7184f0b8ca38a609"} Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.217585 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" event={"ID":"ceddb914-6af3-4417-861b-20ac241c337d","Type":"ContainerStarted","Data":"dd2828a9c6034948bd584dc10b182b5b5bfedf17c787e0cb7ce9f09fbfab1ce1"} Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.286937 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.287183 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.78709521 +0000 UTC m=+159.355108380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.287516 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.287878 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.787866054 +0000 UTC m=+159.355879224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.316992 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:57 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:57 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:57 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.317384 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.347955 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dvs8h" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.352070 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.388605 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.390378 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.89036233 +0000 UTC m=+159.458375500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.491321 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.491816 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:57.991804519 +0000 UTC m=+159.559817689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.593593 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.593852 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.09383848 +0000 UTC m=+159.661851650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.613950 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.615131 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.619496 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.636341 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.695571 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.695617 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.695673 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4zch\" (UniqueName: \"kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.695703 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.695851 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.195834868 +0000 UTC m=+159.763848028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.743131 4791 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.796569 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.796732 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.29670772 +0000 UTC m=+159.864720890 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.796804 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.796948 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.796991 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.797049 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4zch\" (UniqueName: \"kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.797458 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.297441133 +0000 UTC m=+159.865454293 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.806469 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.807376 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.809411 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.821240 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.880963 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.881047 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.887266 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4zch\" (UniqueName: \"kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch\") pod \"certified-operators-cp28s\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.897656 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.897818 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.397791307 +0000 UTC m=+159.965804477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.898290 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbmr8\" (UniqueName: \"kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.898529 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.898650 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.898770 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:57 crc kubenswrapper[4791]: E0218 00:36:57.898904 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.398893044 +0000 UTC m=+159.966906214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.927966 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:36:57 crc kubenswrapper[4791]: I0218 00:36:57.999921 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.000173 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: E0218 00:36:58.000233 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.50019826 +0000 UTC m=+160.068211470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.000293 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.000443 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbmr8\" (UniqueName: \"kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.000726 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.000780 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.007008 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.008397 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.020325 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.024800 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbmr8\" (UniqueName: \"kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8\") pod \"community-operators-mklvx\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.101319 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.101361 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.101447 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.101614 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdf7f\" (UniqueName: \"kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: E0218 00:36:58.101795 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.601775474 +0000 UTC m=+160.169788654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.119880 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.202473 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:58 crc kubenswrapper[4791]: E0218 00:36:58.202708 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.702687166 +0000 UTC m=+160.270700336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.202922 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203003 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203059 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203068 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdf7f\" (UniqueName: \"kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203203 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: E0218 00:36:58.203491 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-18 00:36:58.703477572 +0000 UTC m=+160.271490742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-r7vng" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203743 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.203857 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.217685 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.225313 4791 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-02-18T00:36:57.743170836Z","Handler":null,"Name":""} Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.227055 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.228230 4791 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.228256 4791 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.231220 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdf7f\" (UniqueName: \"kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f\") pod \"certified-operators-9svms\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.235522 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.250370 4791 generic.go:334] "Generic (PLEG): container finished" podID="eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" containerID="4205f4b881d0c587d0aaf43aa7185653b959a5c4f9c11d22f324efb0c8116a2e" exitCode=0 Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.250432 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" event={"ID":"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02","Type":"ContainerDied","Data":"4205f4b881d0c587d0aaf43aa7185653b959a5c4f9c11d22f324efb0c8116a2e"} Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.254920 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" event={"ID":"ceddb914-6af3-4417-861b-20ac241c337d","Type":"ContainerStarted","Data":"aa182ab383247e90858dfac841e52ddf0e5763c3b67c02b60f930daf1c4ef1c1"} Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.297076 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-l4ktw" podStartSLOduration=10.297057752 podStartE2EDuration="10.297057752s" podCreationTimestamp="2026-02-18 00:36:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:58.296345839 +0000 UTC m=+159.864359009" watchObservedRunningTime="2026-02-18 00:36:58.297057752 +0000 UTC m=+159.865070922" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.308553 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.308874 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.309027 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.309078 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fvnj\" (UniqueName: \"kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.313978 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.315253 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:58 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:58 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:58 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.315310 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.346501 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.351837 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:36:58 crc kubenswrapper[4791]: W0218 00:36:58.362318 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14300052_284f_4b0c_8238_13ea3a9ddb6a.slice/crio-1ab783d14212ef3aba4e76a514e4a75877f863004f54e9bd8c493cc46b4217c9 WatchSource:0}: Error finding container 1ab783d14212ef3aba4e76a514e4a75877f863004f54e9bd8c493cc46b4217c9: Status 404 returned error can't find the container with id 1ab783d14212ef3aba4e76a514e4a75877f863004f54e9bd8c493cc46b4217c9 Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.410677 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.410712 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fvnj\" (UniqueName: \"kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.410760 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.410817 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.411534 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.411564 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.413696 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.413739 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.427969 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fvnj\" (UniqueName: \"kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj\") pod \"community-operators-2gl6m\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.439775 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-r7vng\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.531694 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.535120 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.550333 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.761145 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:36:58 crc kubenswrapper[4791]: I0218 00:36:58.792076 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:36:58 crc kubenswrapper[4791]: W0218 00:36:58.802340 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12887847_f4a6_4fed_b6b5_e30a9c494948.slice/crio-fc4fdb72a058c92a131a1aef7a68aaacc6033a912e103a1d2a5305df88b4485e WatchSource:0}: Error finding container fc4fdb72a058c92a131a1aef7a68aaacc6033a912e103a1d2a5305df88b4485e: Status 404 returned error can't find the container with id fc4fdb72a058c92a131a1aef7a68aaacc6033a912e103a1d2a5305df88b4485e Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.067360 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.260637 4791 generic.go:334] "Generic (PLEG): container finished" podID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerID="240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227" exitCode=0 Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.260707 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerDied","Data":"240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.260737 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerStarted","Data":"fc4fdb72a058c92a131a1aef7a68aaacc6033a912e103a1d2a5305df88b4485e"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.262283 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.263463 4791 generic.go:334] "Generic (PLEG): container finished" podID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerID="6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6" exitCode=0 Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.263558 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerDied","Data":"6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.263598 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerStarted","Data":"1ab783d14212ef3aba4e76a514e4a75877f863004f54e9bd8c493cc46b4217c9"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.265565 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" event={"ID":"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7","Type":"ContainerStarted","Data":"af0bd9293dd92cc94bae0f4f216ab7d397ce48f46b0dabbec0cb5cf4a603d709"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.265600 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" event={"ID":"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7","Type":"ContainerStarted","Data":"9dc98f91327b9eb40a048c381fac3756355e309ce37795ba656c1676c6fdd520"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.265909 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.268450 4791 generic.go:334] "Generic (PLEG): container finished" podID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerID="b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37" exitCode=0 Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.268516 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerDied","Data":"b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.268535 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerStarted","Data":"30a37bea908c9d8cd2a06f4813f0318b847ca5b724233b4f7a64ef3970dde163"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.287349 4791 generic.go:334] "Generic (PLEG): container finished" podID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerID="cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45" exitCode=0 Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.287616 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerDied","Data":"cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.287672 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerStarted","Data":"3d54bc38584a692c514346dc9d2939839c2f85fca99ecaec01e12aafaf11098a"} Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.329935 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" podStartSLOduration=134.329920324 podStartE2EDuration="2m14.329920324s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:36:59.327416001 +0000 UTC m=+160.895429181" watchObservedRunningTime="2026-02-18 00:36:59.329920324 +0000 UTC m=+160.897933494" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.334429 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:36:59 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:36:59 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:36:59 crc kubenswrapper[4791]: healthz check failed Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.334485 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.539468 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.631180 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcclb\" (UniqueName: \"kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb\") pod \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.631394 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume\") pod \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.631517 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume\") pod \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\" (UID: \"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02\") " Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.632250 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume" (OuterVolumeSpecName: "config-volume") pod "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" (UID: "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.637621 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb" (OuterVolumeSpecName: "kube-api-access-lcclb") pod "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" (UID: "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02"). InnerVolumeSpecName "kube-api-access-lcclb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.637651 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" (UID: "eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.732483 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcclb\" (UniqueName: \"kubernetes.io/projected/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-kube-api-access-lcclb\") on node \"crc\" DevicePath \"\"" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.732518 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.732527 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.812473 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:36:59 crc kubenswrapper[4791]: E0218 00:36:59.812748 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" containerName="collect-profiles" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.812771 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" containerName="collect-profiles" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.812895 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" containerName="collect-profiles" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.814056 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.815896 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.823873 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.934834 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.934885 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:36:59 crc kubenswrapper[4791]: I0218 00:36:59.934990 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm7wc\" (UniqueName: \"kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.011306 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.012112 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.013997 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.014268 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.016374 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.035668 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.035777 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm7wc\" (UniqueName: \"kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.035841 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.037269 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.037435 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.053045 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm7wc\" (UniqueName: \"kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc\") pod \"redhat-marketplace-l5zp8\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.126371 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.138052 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.138274 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.215774 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.219768 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.231838 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.240287 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.240329 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.240407 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.264228 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.320346 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:37:00 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:37:00 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:37:00 crc kubenswrapper[4791]: healthz check failed Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.320792 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.322324 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" event={"ID":"eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02","Type":"ContainerDied","Data":"1701713f5ff218a737090ca561a699ae2adeeacd4384a68fee2473c6045c565f"} Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.322383 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1701713f5ff218a737090ca561a699ae2adeeacd4384a68fee2473c6045c565f" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.322453 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.333115 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.341731 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.341787 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rxd5\" (UniqueName: \"kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.341883 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: W0218 00:37:00.412466 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5747934_5c30_4b86_bfd5_03a8ad37c9bd.slice/crio-a21bd7a9974d43391d23b817771c14e4f64240abee15da220981494ba671db44 WatchSource:0}: Error finding container a21bd7a9974d43391d23b817771c14e4f64240abee15da220981494ba671db44: Status 404 returned error can't find the container with id a21bd7a9974d43391d23b817771c14e4f64240abee15da220981494ba671db44 Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.427456 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.443050 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.443127 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.443167 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rxd5\" (UniqueName: \"kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.443579 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.445002 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.471010 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rxd5\" (UniqueName: \"kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5\") pod \"redhat-marketplace-lxgnb\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.559311 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.587798 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.605676 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.610359 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-xdf9g" Feb 18 00:37:00 crc kubenswrapper[4791]: W0218 00:37:00.616028 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod487f84df_585a_4249_943e_60252e7118c4.slice/crio-8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8 WatchSource:0}: Error finding container 8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8: Status 404 returned error can't find the container with id 8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8 Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.805209 4791 patch_prober.go:28] interesting pod/downloads-7954f5f757-qj494 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.805266 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-qj494" podUID="cf6cb538-0c36-4140-a49f-2fcf19d46169" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.805413 4791 patch_prober.go:28] interesting pod/downloads-7954f5f757-qj494 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.805435 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qj494" podUID="cf6cb538-0c36-4140-a49f-2fcf19d46169" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.817906 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.819451 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.823514 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.833029 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.840820 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.841484 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.847015 4791 patch_prober.go:28] interesting pod/console-f9d7485db-rxhdk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.17:8443/health\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.847145 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-rxhdk" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.17:8443/health\": dial tcp 10.217.0.17:8443: connect: connection refused" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.953817 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4krc\" (UniqueName: \"kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.955367 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:00 crc kubenswrapper[4791]: I0218 00:37:00.956150 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.058759 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.058861 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.058928 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4krc\" (UniqueName: \"kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.059451 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.060124 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.081690 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.087917 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4krc\" (UniqueName: \"kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc\") pod \"redhat-operators-mqrgh\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.150447 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.220858 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.222024 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.251131 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.313605 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.315144 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.324335 4791 patch_prober.go:28] interesting pod/router-default-5444994796-tk98h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 18 00:37:01 crc kubenswrapper[4791]: [-]has-synced failed: reason withheld Feb 18 00:37:01 crc kubenswrapper[4791]: [+]process-running ok Feb 18 00:37:01 crc kubenswrapper[4791]: healthz check failed Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.324382 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tk98h" podUID="5826b603-6616-49b5-a4d0-9dd63e715c9e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.369910 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.369959 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpl25\" (UniqueName: \"kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.369984 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.397216 4791 generic.go:334] "Generic (PLEG): container finished" podID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerID="204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d" exitCode=0 Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.397367 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerDied","Data":"204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d"} Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.397557 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerStarted","Data":"a21bd7a9974d43391d23b817771c14e4f64240abee15da220981494ba671db44"} Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.417491 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"487f84df-585a-4249-943e-60252e7118c4","Type":"ContainerStarted","Data":"b72d799e7aabd85d7a0df0a63bb1dda9c7287efd0b3326c3dd3192a93f6e5dbb"} Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.425551 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"487f84df-585a-4249-943e-60252e7118c4","Type":"ContainerStarted","Data":"8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8"} Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.425668 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerStarted","Data":"da2b20ad41a5e64b8ec03b93c84692003a418a50af9fe50e2b85083e417ad9db"} Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.463700 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.463684111 podStartE2EDuration="2.463684111s" podCreationTimestamp="2026-02-18 00:36:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:37:01.462960166 +0000 UTC m=+163.030973336" watchObservedRunningTime="2026-02-18 00:37:01.463684111 +0000 UTC m=+163.031697281" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.472839 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.472889 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpl25\" (UniqueName: \"kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.472911 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.473993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.474492 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.498395 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpl25\" (UniqueName: \"kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25\") pod \"redhat-operators-l97cx\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.584677 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.707103 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:37:01 crc kubenswrapper[4791]: I0218 00:37:01.945744 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:37:01 crc kubenswrapper[4791]: W0218 00:37:01.969464 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a20a60a_9750_49bc_82b3_5d10ea3fc7f5.slice/crio-7655117cb5e68f77392a25cd3d6fc53978c457c68739c1180d3e8ba5944fd69b WatchSource:0}: Error finding container 7655117cb5e68f77392a25cd3d6fc53978c457c68739c1180d3e8ba5944fd69b: Status 404 returned error can't find the container with id 7655117cb5e68f77392a25cd3d6fc53978c457c68739c1180d3e8ba5944fd69b Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.316525 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.319102 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-tk98h" Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.441645 4791 generic.go:334] "Generic (PLEG): container finished" podID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerID="ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd" exitCode=0 Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.441742 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerDied","Data":"ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd"} Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.446153 4791 generic.go:334] "Generic (PLEG): container finished" podID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerID="a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0" exitCode=0 Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.446239 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerDied","Data":"a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0"} Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.446271 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerStarted","Data":"7655117cb5e68f77392a25cd3d6fc53978c457c68739c1180d3e8ba5944fd69b"} Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.451172 4791 generic.go:334] "Generic (PLEG): container finished" podID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerID="ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7" exitCode=0 Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.451258 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerDied","Data":"ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7"} Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.451291 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerStarted","Data":"a494bdf4351686227d3dcc4eb1b047146bb9742e5fcd8aa4b9f6bd9b555fdc68"} Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.458316 4791 generic.go:334] "Generic (PLEG): container finished" podID="487f84df-585a-4249-943e-60252e7118c4" containerID="b72d799e7aabd85d7a0df0a63bb1dda9c7287efd0b3326c3dd3192a93f6e5dbb" exitCode=0 Feb 18 00:37:02 crc kubenswrapper[4791]: I0218 00:37:02.458475 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"487f84df-585a-4249-943e-60252e7118c4","Type":"ContainerDied","Data":"b72d799e7aabd85d7a0df0a63bb1dda9c7287efd0b3326c3dd3192a93f6e5dbb"} Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.077334 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.078576 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.081937 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.083392 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.091844 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.202522 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.202617 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.303804 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.303898 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.303952 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.315460 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-vlrzr" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.331509 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.402693 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.799226 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.917973 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir\") pod \"487f84df-585a-4249-943e-60252e7118c4\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.918044 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access\") pod \"487f84df-585a-4249-943e-60252e7118c4\" (UID: \"487f84df-585a-4249-943e-60252e7118c4\") " Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.918105 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "487f84df-585a-4249-943e-60252e7118c4" (UID: "487f84df-585a-4249-943e-60252e7118c4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.918326 4791 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/487f84df-585a-4249-943e-60252e7118c4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.922786 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "487f84df-585a-4249-943e-60252e7118c4" (UID: "487f84df-585a-4249-943e-60252e7118c4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:37:03 crc kubenswrapper[4791]: I0218 00:37:03.990837 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 18 00:37:04 crc kubenswrapper[4791]: I0218 00:37:04.019510 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/487f84df-585a-4249-943e-60252e7118c4-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:04 crc kubenswrapper[4791]: W0218 00:37:04.019866 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda748d12a_1368_4c0c_83d3_1ff2beaa80ee.slice/crio-2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977 WatchSource:0}: Error finding container 2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977: Status 404 returned error can't find the container with id 2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977 Feb 18 00:37:04 crc kubenswrapper[4791]: I0218 00:37:04.508959 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a748d12a-1368-4c0c-83d3-1ff2beaa80ee","Type":"ContainerStarted","Data":"2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977"} Feb 18 00:37:04 crc kubenswrapper[4791]: I0218 00:37:04.516581 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"487f84df-585a-4249-943e-60252e7118c4","Type":"ContainerDied","Data":"8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8"} Feb 18 00:37:04 crc kubenswrapper[4791]: I0218 00:37:04.516624 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a0fe7618c9917fdb949863d7a440f986db76cf1d43b2e3c0044779cc07dcbc8" Feb 18 00:37:04 crc kubenswrapper[4791]: I0218 00:37:04.516638 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 18 00:37:05 crc kubenswrapper[4791]: I0218 00:37:05.527925 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a748d12a-1368-4c0c-83d3-1ff2beaa80ee","Type":"ContainerStarted","Data":"45ee8b06f2d4c3e3fb04f40419bd71a6d7cce6724915fcca468ac6e01da93b68"} Feb 18 00:37:05 crc kubenswrapper[4791]: I0218 00:37:05.549329 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.549315088 podStartE2EDuration="2.549315088s" podCreationTimestamp="2026-02-18 00:37:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:37:05.548412329 +0000 UTC m=+167.116425499" watchObservedRunningTime="2026-02-18 00:37:05.549315088 +0000 UTC m=+167.117328258" Feb 18 00:37:06 crc kubenswrapper[4791]: I0218 00:37:06.537600 4791 generic.go:334] "Generic (PLEG): container finished" podID="a748d12a-1368-4c0c-83d3-1ff2beaa80ee" containerID="45ee8b06f2d4c3e3fb04f40419bd71a6d7cce6724915fcca468ac6e01da93b68" exitCode=0 Feb 18 00:37:06 crc kubenswrapper[4791]: I0218 00:37:06.537642 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a748d12a-1368-4c0c-83d3-1ff2beaa80ee","Type":"ContainerDied","Data":"45ee8b06f2d4c3e3fb04f40419bd71a6d7cce6724915fcca468ac6e01da93b68"} Feb 18 00:37:08 crc kubenswrapper[4791]: I0218 00:37:08.496328 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:37:08 crc kubenswrapper[4791]: I0218 00:37:08.509139 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afcf9ee1-4224-441c-a98d-9330bed34065-metrics-certs\") pod \"network-metrics-daemon-jq75l\" (UID: \"afcf9ee1-4224-441c-a98d-9330bed34065\") " pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:37:08 crc kubenswrapper[4791]: I0218 00:37:08.726150 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jq75l" Feb 18 00:37:10 crc kubenswrapper[4791]: I0218 00:37:10.807530 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-qj494" Feb 18 00:37:10 crc kubenswrapper[4791]: I0218 00:37:10.872995 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:37:10 crc kubenswrapper[4791]: I0218 00:37:10.887523 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.083224 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.165990 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.209484 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir\") pod \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.209842 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access\") pod \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\" (UID: \"a748d12a-1368-4c0c-83d3-1ff2beaa80ee\") " Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.209645 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a748d12a-1368-4c0c-83d3-1ff2beaa80ee" (UID: "a748d12a-1368-4c0c-83d3-1ff2beaa80ee"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.210319 4791 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.224198 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a748d12a-1368-4c0c-83d3-1ff2beaa80ee" (UID: "a748d12a-1368-4c0c-83d3-1ff2beaa80ee"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.311851 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a748d12a-1368-4c0c-83d3-1ff2beaa80ee-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.598618 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a748d12a-1368-4c0c-83d3-1ff2beaa80ee","Type":"ContainerDied","Data":"2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977"} Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.598652 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a84c5c5e58f127f675044fc80e268348996ab6588095136f08b713dbd5cf977" Feb 18 00:37:17 crc kubenswrapper[4791]: I0218 00:37:17.598704 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 18 00:37:18 crc kubenswrapper[4791]: I0218 00:37:18.538276 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:37:24 crc kubenswrapper[4791]: I0218 00:37:24.671022 4791 generic.go:334] "Generic (PLEG): container finished" podID="da9a0a6f-10c0-4fa0-8417-642ff4194832" containerID="e4a7db5ed63ade2524a1403f103e2253affe7f9a323aa09490dba5056eaf5962" exitCode=0 Feb 18 00:37:24 crc kubenswrapper[4791]: I0218 00:37:24.671131 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29522880-928kh" event={"ID":"da9a0a6f-10c0-4fa0-8417-642ff4194832","Type":"ContainerDied","Data":"e4a7db5ed63ade2524a1403f103e2253affe7f9a323aa09490dba5056eaf5962"} Feb 18 00:37:26 crc kubenswrapper[4791]: I0218 00:37:26.799996 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:37:26 crc kubenswrapper[4791]: I0218 00:37:26.800497 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:37:27 crc kubenswrapper[4791]: E0218 00:37:27.709721 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 18 00:37:27 crc kubenswrapper[4791]: E0218 00:37:27.710110 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2fvnj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-2gl6m_openshift-marketplace(12887847-f4a6-4fed-b6b5-e30a9c494948): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:27 crc kubenswrapper[4791]: E0218 00:37:27.711578 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-2gl6m" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" Feb 18 00:37:30 crc kubenswrapper[4791]: I0218 00:37:30.886815 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k8nrp" Feb 18 00:37:30 crc kubenswrapper[4791]: E0218 00:37:30.941857 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 18 00:37:30 crc kubenswrapper[4791]: E0218 00:37:30.942073 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b4zch,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-cp28s_openshift-marketplace(719d62e6-1ac9-497f-b889-d2ee84c621d1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:30 crc kubenswrapper[4791]: E0218 00:37:30.944422 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-cp28s" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" Feb 18 00:37:32 crc kubenswrapper[4791]: E0218 00:37:32.619495 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-2gl6m" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" Feb 18 00:37:32 crc kubenswrapper[4791]: E0218 00:37:32.619826 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-cp28s" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" Feb 18 00:37:32 crc kubenswrapper[4791]: E0218 00:37:32.841415 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 18 00:37:32 crc kubenswrapper[4791]: E0218 00:37:32.841588 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p4krc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mqrgh_openshift-marketplace(edb21ff9-bfba-4ff7-a6df-54d2236a1233): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:32 crc kubenswrapper[4791]: E0218 00:37:32.843834 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mqrgh" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" Feb 18 00:37:34 crc kubenswrapper[4791]: E0218 00:37:34.850365 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mqrgh" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" Feb 18 00:37:34 crc kubenswrapper[4791]: I0218 00:37:34.911109 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.048209 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca\") pod \"da9a0a6f-10c0-4fa0-8417-642ff4194832\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.048369 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26hsc\" (UniqueName: \"kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc\") pod \"da9a0a6f-10c0-4fa0-8417-642ff4194832\" (UID: \"da9a0a6f-10c0-4fa0-8417-642ff4194832\") " Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.049029 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca" (OuterVolumeSpecName: "serviceca") pod "da9a0a6f-10c0-4fa0-8417-642ff4194832" (UID: "da9a0a6f-10c0-4fa0-8417-642ff4194832"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.053844 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc" (OuterVolumeSpecName: "kube-api-access-26hsc") pod "da9a0a6f-10c0-4fa0-8417-642ff4194832" (UID: "da9a0a6f-10c0-4fa0-8417-642ff4194832"). InnerVolumeSpecName "kube-api-access-26hsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.149388 4791 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/da9a0a6f-10c0-4fa0-8417-642ff4194832-serviceca\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.149428 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26hsc\" (UniqueName: \"kubernetes.io/projected/da9a0a6f-10c0-4fa0-8417-642ff4194832-kube-api-access-26hsc\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.729326 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29522880-928kh" event={"ID":"da9a0a6f-10c0-4fa0-8417-642ff4194832","Type":"ContainerDied","Data":"8500f196330df72d58e1a8487943e12656f1493ca43dcf03fcb178b7061a18b4"} Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.729567 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8500f196330df72d58e1a8487943e12656f1493ca43dcf03fcb178b7061a18b4" Feb 18 00:37:35 crc kubenswrapper[4791]: I0218 00:37:35.729367 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29522880-928kh" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:35.965832 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:35.966200 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fbmr8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mklvx_openshift-marketplace(14300052-284f-4b0c-8238-13ea3a9ddb6a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:35.967550 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-mklvx" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" Feb 18 00:37:36 crc kubenswrapper[4791]: I0218 00:37:36.318191 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jq75l"] Feb 18 00:37:36 crc kubenswrapper[4791]: W0218 00:37:36.415851 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podafcf9ee1_4224_441c_a98d_9330bed34065.slice/crio-d2a3b525d40053faf4cf863e5423c09d3d3407abc644eb26e12d20ab672fa6d6 WatchSource:0}: Error finding container d2a3b525d40053faf4cf863e5423c09d3d3407abc644eb26e12d20ab672fa6d6: Status 404 returned error can't find the container with id d2a3b525d40053faf4cf863e5423c09d3d3407abc644eb26e12d20ab672fa6d6 Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.525268 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.525530 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mpl25,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-l97cx_openshift-marketplace(5a20a60a-9750-49bc-82b3-5d10ea3fc7f5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.526676 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-l97cx" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.606188 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.606548 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hdf7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-9svms_openshift-marketplace(34fbd2ce-81ba-4a98-ba90-982996dec809): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.607715 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-9svms" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" Feb 18 00:37:36 crc kubenswrapper[4791]: I0218 00:37:36.736506 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerStarted","Data":"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd"} Feb 18 00:37:36 crc kubenswrapper[4791]: I0218 00:37:36.739325 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jq75l" event={"ID":"afcf9ee1-4224-441c-a98d-9330bed34065","Type":"ContainerStarted","Data":"1d89eaa97513be7d27606e55e2f6e461ae11e7cf9d66141119e36e569862cdcd"} Feb 18 00:37:36 crc kubenswrapper[4791]: I0218 00:37:36.739389 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jq75l" event={"ID":"afcf9ee1-4224-441c-a98d-9330bed34065","Type":"ContainerStarted","Data":"d2a3b525d40053faf4cf863e5423c09d3d3407abc644eb26e12d20ab672fa6d6"} Feb 18 00:37:36 crc kubenswrapper[4791]: I0218 00:37:36.748093 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerStarted","Data":"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb"} Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.748720 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-9svms" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.749509 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mklvx" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" Feb 18 00:37:36 crc kubenswrapper[4791]: E0218 00:37:36.752461 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-l97cx" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.755431 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jq75l" event={"ID":"afcf9ee1-4224-441c-a98d-9330bed34065","Type":"ContainerStarted","Data":"0750e3d470fdb0496d4d0187ce7e510766cd2fe2c1fc0b836a1b86795ccc648d"} Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.757845 4791 generic.go:334] "Generic (PLEG): container finished" podID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerID="57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb" exitCode=0 Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.757943 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerDied","Data":"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb"} Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.758028 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerStarted","Data":"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd"} Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.763738 4791 generic.go:334] "Generic (PLEG): container finished" podID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerID="887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd" exitCode=0 Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.763793 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerDied","Data":"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd"} Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.775481 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-jq75l" podStartSLOduration=172.775456721 podStartE2EDuration="2m52.775456721s" podCreationTimestamp="2026-02-18 00:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:37:37.770091253 +0000 UTC m=+199.338104423" watchObservedRunningTime="2026-02-18 00:37:37.775456721 +0000 UTC m=+199.343469911" Feb 18 00:37:37 crc kubenswrapper[4791]: I0218 00:37:37.799509 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l5zp8" podStartSLOduration=3.062699692 podStartE2EDuration="38.799484117s" podCreationTimestamp="2026-02-18 00:36:59 +0000 UTC" firstStartedPulling="2026-02-18 00:37:01.415564637 +0000 UTC m=+162.983577807" lastFinishedPulling="2026-02-18 00:37:37.152349062 +0000 UTC m=+198.720362232" observedRunningTime="2026-02-18 00:37:37.794198992 +0000 UTC m=+199.362212172" watchObservedRunningTime="2026-02-18 00:37:37.799484117 +0000 UTC m=+199.367497287" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267074 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 18 00:37:38 crc kubenswrapper[4791]: E0218 00:37:38.267818 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="487f84df-585a-4249-943e-60252e7118c4" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267839 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="487f84df-585a-4249-943e-60252e7118c4" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: E0218 00:37:38.267848 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da9a0a6f-10c0-4fa0-8417-642ff4194832" containerName="image-pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267854 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="da9a0a6f-10c0-4fa0-8417-642ff4194832" containerName="image-pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: E0218 00:37:38.267866 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a748d12a-1368-4c0c-83d3-1ff2beaa80ee" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267872 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a748d12a-1368-4c0c-83d3-1ff2beaa80ee" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267955 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a748d12a-1368-4c0c-83d3-1ff2beaa80ee" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267965 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="487f84df-585a-4249-943e-60252e7118c4" containerName="pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.267976 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="da9a0a6f-10c0-4fa0-8417-642ff4194832" containerName="image-pruner" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.268320 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.271421 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.271826 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.286054 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.388083 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.388126 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.488890 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.489216 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.489730 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.516097 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.590432 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.779588 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerStarted","Data":"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664"} Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.835991 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lxgnb" podStartSLOduration=3.136827822 podStartE2EDuration="38.835967816s" podCreationTimestamp="2026-02-18 00:37:00 +0000 UTC" firstStartedPulling="2026-02-18 00:37:02.444348102 +0000 UTC m=+164.012361272" lastFinishedPulling="2026-02-18 00:37:38.143488096 +0000 UTC m=+199.711501266" observedRunningTime="2026-02-18 00:37:38.804190524 +0000 UTC m=+200.372203704" watchObservedRunningTime="2026-02-18 00:37:38.835967816 +0000 UTC m=+200.403980976" Feb 18 00:37:38 crc kubenswrapper[4791]: I0218 00:37:38.838493 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 18 00:37:39 crc kubenswrapper[4791]: I0218 00:37:39.785926 4791 generic.go:334] "Generic (PLEG): container finished" podID="eb86362a-90c0-4695-87c6-dff832957383" containerID="6cdf7b8f525e0f3a41271997ee7ab544862b49be1c8cb0999697fa88e110ef8b" exitCode=0 Feb 18 00:37:39 crc kubenswrapper[4791]: I0218 00:37:39.785986 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"eb86362a-90c0-4695-87c6-dff832957383","Type":"ContainerDied","Data":"6cdf7b8f525e0f3a41271997ee7ab544862b49be1c8cb0999697fa88e110ef8b"} Feb 18 00:37:39 crc kubenswrapper[4791]: I0218 00:37:39.786237 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"eb86362a-90c0-4695-87c6-dff832957383","Type":"ContainerStarted","Data":"c7da16058e7e34318dfa64bae04bb4d08a9a29b36c08ac131a8ba662f7696491"} Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.130639 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.130694 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.287458 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.559568 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.559920 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:40 crc kubenswrapper[4791]: I0218 00:37:40.623936 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.078148 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.227552 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access\") pod \"eb86362a-90c0-4695-87c6-dff832957383\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.227629 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir\") pod \"eb86362a-90c0-4695-87c6-dff832957383\" (UID: \"eb86362a-90c0-4695-87c6-dff832957383\") " Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.228368 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "eb86362a-90c0-4695-87c6-dff832957383" (UID: "eb86362a-90c0-4695-87c6-dff832957383"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.259870 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "eb86362a-90c0-4695-87c6-dff832957383" (UID: "eb86362a-90c0-4695-87c6-dff832957383"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.329697 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb86362a-90c0-4695-87c6-dff832957383-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.329731 4791 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eb86362a-90c0-4695-87c6-dff832957383-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.815949 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.820522 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"eb86362a-90c0-4695-87c6-dff832957383","Type":"ContainerDied","Data":"c7da16058e7e34318dfa64bae04bb4d08a9a29b36c08ac131a8ba662f7696491"} Feb 18 00:37:41 crc kubenswrapper[4791]: I0218 00:37:41.820626 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7da16058e7e34318dfa64bae04bb4d08a9a29b36c08ac131a8ba662f7696491" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.078494 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 18 00:37:43 crc kubenswrapper[4791]: E0218 00:37:43.078738 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb86362a-90c0-4695-87c6-dff832957383" containerName="pruner" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.078754 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb86362a-90c0-4695-87c6-dff832957383" containerName="pruner" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.078889 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb86362a-90c0-4695-87c6-dff832957383" containerName="pruner" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.079352 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.081289 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.082857 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.086343 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.256654 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.256712 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.256792 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.357918 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.357959 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.358015 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.358011 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.358075 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.375765 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.404845 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:37:43 crc kubenswrapper[4791]: I0218 00:37:43.911702 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 18 00:37:43 crc kubenswrapper[4791]: W0218 00:37:43.924390 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3fd8eea0_19ca_4527_9a36_2d7cd67fba45.slice/crio-2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9 WatchSource:0}: Error finding container 2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9: Status 404 returned error can't find the container with id 2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9 Feb 18 00:37:44 crc kubenswrapper[4791]: I0218 00:37:44.852771 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3fd8eea0-19ca-4527-9a36-2d7cd67fba45","Type":"ContainerStarted","Data":"8cf7da1bf157d81bb8aa1be96a661711d9e33d35942f2a7466eb0fc1b61daa06"} Feb 18 00:37:44 crc kubenswrapper[4791]: I0218 00:37:44.853246 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3fd8eea0-19ca-4527-9a36-2d7cd67fba45","Type":"ContainerStarted","Data":"2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9"} Feb 18 00:37:44 crc kubenswrapper[4791]: I0218 00:37:44.874098 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.874072678 podStartE2EDuration="1.874072678s" podCreationTimestamp="2026-02-18 00:37:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:37:44.868656284 +0000 UTC m=+206.436669534" watchObservedRunningTime="2026-02-18 00:37:44.874072678 +0000 UTC m=+206.442085888" Feb 18 00:37:46 crc kubenswrapper[4791]: I0218 00:37:46.870424 4791 generic.go:334] "Generic (PLEG): container finished" podID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerID="734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494" exitCode=0 Feb 18 00:37:46 crc kubenswrapper[4791]: I0218 00:37:46.870624 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerDied","Data":"734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494"} Feb 18 00:37:48 crc kubenswrapper[4791]: I0218 00:37:48.879066 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerStarted","Data":"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968"} Feb 18 00:37:48 crc kubenswrapper[4791]: I0218 00:37:48.881033 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerStarted","Data":"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6"} Feb 18 00:37:49 crc kubenswrapper[4791]: I0218 00:37:49.081192 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2gl6m" podStartSLOduration=2.649109926 podStartE2EDuration="51.081175573s" podCreationTimestamp="2026-02-18 00:36:58 +0000 UTC" firstStartedPulling="2026-02-18 00:36:59.261963873 +0000 UTC m=+160.829977043" lastFinishedPulling="2026-02-18 00:37:47.69402952 +0000 UTC m=+209.262042690" observedRunningTime="2026-02-18 00:37:48.911813025 +0000 UTC m=+210.479826195" watchObservedRunningTime="2026-02-18 00:37:49.081175573 +0000 UTC m=+210.649188743" Feb 18 00:37:49 crc kubenswrapper[4791]: I0218 00:37:49.331262 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:37:49 crc kubenswrapper[4791]: I0218 00:37:49.886902 4791 generic.go:334] "Generic (PLEG): container finished" podID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerID="ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968" exitCode=0 Feb 18 00:37:49 crc kubenswrapper[4791]: I0218 00:37:49.886972 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerDied","Data":"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968"} Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.185985 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.596080 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.894056 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerStarted","Data":"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990"} Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.900226 4791 generic.go:334] "Generic (PLEG): container finished" podID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerID="9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863" exitCode=0 Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.900284 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerDied","Data":"9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863"} Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.906745 4791 generic.go:334] "Generic (PLEG): container finished" podID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerID="e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d" exitCode=0 Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.906779 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerDied","Data":"e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d"} Feb 18 00:37:50 crc kubenswrapper[4791]: I0218 00:37:50.918035 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cp28s" podStartSLOduration=2.918419113 podStartE2EDuration="53.918018427s" podCreationTimestamp="2026-02-18 00:36:57 +0000 UTC" firstStartedPulling="2026-02-18 00:36:59.290991075 +0000 UTC m=+160.859004245" lastFinishedPulling="2026-02-18 00:37:50.290590389 +0000 UTC m=+211.858603559" observedRunningTime="2026-02-18 00:37:50.915180436 +0000 UTC m=+212.483193616" watchObservedRunningTime="2026-02-18 00:37:50.918018427 +0000 UTC m=+212.486031597" Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.918855 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerStarted","Data":"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9"} Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.921428 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerStarted","Data":"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9"} Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.923820 4791 generic.go:334] "Generic (PLEG): container finished" podID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerID="e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6" exitCode=0 Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.923861 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerDied","Data":"e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6"} Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.939105 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9svms" podStartSLOduration=2.662231363 podStartE2EDuration="54.939089636s" podCreationTimestamp="2026-02-18 00:36:57 +0000 UTC" firstStartedPulling="2026-02-18 00:36:59.279967369 +0000 UTC m=+160.847980549" lastFinishedPulling="2026-02-18 00:37:51.556825652 +0000 UTC m=+213.124838822" observedRunningTime="2026-02-18 00:37:51.936056178 +0000 UTC m=+213.504069348" watchObservedRunningTime="2026-02-18 00:37:51.939089636 +0000 UTC m=+213.507102806" Feb 18 00:37:51 crc kubenswrapper[4791]: I0218 00:37:51.970788 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mklvx" podStartSLOduration=2.910177783 podStartE2EDuration="54.970768465s" podCreationTimestamp="2026-02-18 00:36:57 +0000 UTC" firstStartedPulling="2026-02-18 00:36:59.264719744 +0000 UTC m=+160.832732924" lastFinishedPulling="2026-02-18 00:37:51.325310436 +0000 UTC m=+212.893323606" observedRunningTime="2026-02-18 00:37:51.96688921 +0000 UTC m=+213.534902390" watchObservedRunningTime="2026-02-18 00:37:51.970768465 +0000 UTC m=+213.538781635" Feb 18 00:37:52 crc kubenswrapper[4791]: I0218 00:37:52.929948 4791 generic.go:334] "Generic (PLEG): container finished" podID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerID="d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5" exitCode=0 Feb 18 00:37:52 crc kubenswrapper[4791]: I0218 00:37:52.930024 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerDied","Data":"d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5"} Feb 18 00:37:52 crc kubenswrapper[4791]: I0218 00:37:52.932580 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerStarted","Data":"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4"} Feb 18 00:37:52 crc kubenswrapper[4791]: I0218 00:37:52.969477 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mqrgh" podStartSLOduration=3.103422029 podStartE2EDuration="52.969457243s" podCreationTimestamp="2026-02-18 00:37:00 +0000 UTC" firstStartedPulling="2026-02-18 00:37:02.4545138 +0000 UTC m=+164.022526970" lastFinishedPulling="2026-02-18 00:37:52.320549014 +0000 UTC m=+213.888562184" observedRunningTime="2026-02-18 00:37:52.967100878 +0000 UTC m=+214.535114048" watchObservedRunningTime="2026-02-18 00:37:52.969457243 +0000 UTC m=+214.537470413" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.303672 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.303883 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lxgnb" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="registry-server" containerID="cri-o://ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664" gracePeriod=2 Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.700826 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.820634 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content\") pod \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.820705 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rxd5\" (UniqueName: \"kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5\") pod \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.820771 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities\") pod \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\" (UID: \"6f5a9f53-3f5e-44d5-b322-fb0910613ad8\") " Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.821759 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities" (OuterVolumeSpecName: "utilities") pod "6f5a9f53-3f5e-44d5-b322-fb0910613ad8" (UID: "6f5a9f53-3f5e-44d5-b322-fb0910613ad8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.822916 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.826736 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5" (OuterVolumeSpecName: "kube-api-access-6rxd5") pod "6f5a9f53-3f5e-44d5-b322-fb0910613ad8" (UID: "6f5a9f53-3f5e-44d5-b322-fb0910613ad8"). InnerVolumeSpecName "kube-api-access-6rxd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.843092 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f5a9f53-3f5e-44d5-b322-fb0910613ad8" (UID: "6f5a9f53-3f5e-44d5-b322-fb0910613ad8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.924292 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.924602 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rxd5\" (UniqueName: \"kubernetes.io/projected/6f5a9f53-3f5e-44d5-b322-fb0910613ad8-kube-api-access-6rxd5\") on node \"crc\" DevicePath \"\"" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.952127 4791 generic.go:334] "Generic (PLEG): container finished" podID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerID="ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664" exitCode=0 Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.952230 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lxgnb" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.952278 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerDied","Data":"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664"} Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.952313 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lxgnb" event={"ID":"6f5a9f53-3f5e-44d5-b322-fb0910613ad8","Type":"ContainerDied","Data":"da2b20ad41a5e64b8ec03b93c84692003a418a50af9fe50e2b85083e417ad9db"} Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.952333 4791 scope.go:117] "RemoveContainer" containerID="ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.955377 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerStarted","Data":"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b"} Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.970175 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l97cx" podStartSLOduration=2.064904965 podStartE2EDuration="52.970144747s" podCreationTimestamp="2026-02-18 00:37:01 +0000 UTC" firstStartedPulling="2026-02-18 00:37:02.448540762 +0000 UTC m=+164.016553932" lastFinishedPulling="2026-02-18 00:37:53.353780544 +0000 UTC m=+214.921793714" observedRunningTime="2026-02-18 00:37:53.970058194 +0000 UTC m=+215.538071374" watchObservedRunningTime="2026-02-18 00:37:53.970144747 +0000 UTC m=+215.538157917" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.972594 4791 scope.go:117] "RemoveContainer" containerID="887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd" Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.990137 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:53 crc kubenswrapper[4791]: I0218 00:37:53.993501 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lxgnb"] Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.007017 4791 scope.go:117] "RemoveContainer" containerID="ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.019800 4791 scope.go:117] "RemoveContainer" containerID="ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664" Feb 18 00:37:54 crc kubenswrapper[4791]: E0218 00:37:54.020173 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664\": container with ID starting with ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664 not found: ID does not exist" containerID="ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.020282 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664"} err="failed to get container status \"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664\": rpc error: code = NotFound desc = could not find container \"ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664\": container with ID starting with ec7684d880b49b16be81a413a89153f8d5cac1578358f6da6301574df356c664 not found: ID does not exist" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.020379 4791 scope.go:117] "RemoveContainer" containerID="887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd" Feb 18 00:37:54 crc kubenswrapper[4791]: E0218 00:37:54.020864 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd\": container with ID starting with 887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd not found: ID does not exist" containerID="887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.020901 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd"} err="failed to get container status \"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd\": rpc error: code = NotFound desc = could not find container \"887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd\": container with ID starting with 887e1b0a08c4f0089d48eee8713c2ccd0207f520054d2eb7749bc050e59858fd not found: ID does not exist" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.020932 4791 scope.go:117] "RemoveContainer" containerID="ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd" Feb 18 00:37:54 crc kubenswrapper[4791]: E0218 00:37:54.021251 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd\": container with ID starting with ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd not found: ID does not exist" containerID="ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd" Feb 18 00:37:54 crc kubenswrapper[4791]: I0218 00:37:54.021270 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd"} err="failed to get container status \"ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd\": rpc error: code = NotFound desc = could not find container \"ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd\": container with ID starting with ed13f1e785f289b9ba942f72a321d1b6728673eaa1cdf1576d728734d1b643fd not found: ID does not exist" Feb 18 00:37:55 crc kubenswrapper[4791]: I0218 00:37:55.066801 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" path="/var/lib/kubelet/pods/6f5a9f53-3f5e-44d5-b322-fb0910613ad8/volumes" Feb 18 00:37:56 crc kubenswrapper[4791]: I0218 00:37:56.800103 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:37:56 crc kubenswrapper[4791]: I0218 00:37:56.800453 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:37:56 crc kubenswrapper[4791]: I0218 00:37:56.800496 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:37:56 crc kubenswrapper[4791]: I0218 00:37:56.801089 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:37:56 crc kubenswrapper[4791]: I0218 00:37:56.801144 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107" gracePeriod=600 Feb 18 00:37:57 crc kubenswrapper[4791]: I0218 00:37:57.928806 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:37:57 crc kubenswrapper[4791]: I0218 00:37:57.928859 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:37:57 crc kubenswrapper[4791]: I0218 00:37:57.966260 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:37:57 crc kubenswrapper[4791]: I0218 00:37:57.977429 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107" exitCode=0 Feb 18 00:37:57 crc kubenswrapper[4791]: I0218 00:37:57.977494 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107"} Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.011022 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.120542 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.121139 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.171337 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.347714 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.347805 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.388801 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.551135 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.551194 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.604277 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:37:58 crc kubenswrapper[4791]: I0218 00:37:58.984660 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f"} Feb 18 00:37:59 crc kubenswrapper[4791]: I0218 00:37:59.022276 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:37:59 crc kubenswrapper[4791]: I0218 00:37:59.024359 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:37:59 crc kubenswrapper[4791]: I0218 00:37:59.026527 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:38:00 crc kubenswrapper[4791]: I0218 00:38:00.303860 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:38:00 crc kubenswrapper[4791]: I0218 00:38:00.504784 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:38:00 crc kubenswrapper[4791]: I0218 00:38:00.993223 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9svms" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="registry-server" containerID="cri-o://878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9" gracePeriod=2 Feb 18 00:38:00 crc kubenswrapper[4791]: I0218 00:38:00.993419 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2gl6m" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="registry-server" containerID="cri-o://17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6" gracePeriod=2 Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.151099 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.151429 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.195406 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.515219 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.584831 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.584866 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.585687 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.621194 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities\") pod \"12887847-f4a6-4fed-b6b5-e30a9c494948\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.621291 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content\") pod \"12887847-f4a6-4fed-b6b5-e30a9c494948\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.621346 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fvnj\" (UniqueName: \"kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj\") pod \"12887847-f4a6-4fed-b6b5-e30a9c494948\" (UID: \"12887847-f4a6-4fed-b6b5-e30a9c494948\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.622656 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities" (OuterVolumeSpecName: "utilities") pod "12887847-f4a6-4fed-b6b5-e30a9c494948" (UID: "12887847-f4a6-4fed-b6b5-e30a9c494948"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.629346 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj" (OuterVolumeSpecName: "kube-api-access-2fvnj") pod "12887847-f4a6-4fed-b6b5-e30a9c494948" (UID: "12887847-f4a6-4fed-b6b5-e30a9c494948"). InnerVolumeSpecName "kube-api-access-2fvnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.639858 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.693365 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12887847-f4a6-4fed-b6b5-e30a9c494948" (UID: "12887847-f4a6-4fed-b6b5-e30a9c494948"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.722647 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdf7f\" (UniqueName: \"kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f\") pod \"34fbd2ce-81ba-4a98-ba90-982996dec809\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.722700 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities\") pod \"34fbd2ce-81ba-4a98-ba90-982996dec809\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.722808 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content\") pod \"34fbd2ce-81ba-4a98-ba90-982996dec809\" (UID: \"34fbd2ce-81ba-4a98-ba90-982996dec809\") " Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.723095 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fvnj\" (UniqueName: \"kubernetes.io/projected/12887847-f4a6-4fed-b6b5-e30a9c494948-kube-api-access-2fvnj\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.723115 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.723126 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12887847-f4a6-4fed-b6b5-e30a9c494948-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.723641 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities" (OuterVolumeSpecName: "utilities") pod "34fbd2ce-81ba-4a98-ba90-982996dec809" (UID: "34fbd2ce-81ba-4a98-ba90-982996dec809"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.726418 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f" (OuterVolumeSpecName: "kube-api-access-hdf7f") pod "34fbd2ce-81ba-4a98-ba90-982996dec809" (UID: "34fbd2ce-81ba-4a98-ba90-982996dec809"). InnerVolumeSpecName "kube-api-access-hdf7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.824371 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdf7f\" (UniqueName: \"kubernetes.io/projected/34fbd2ce-81ba-4a98-ba90-982996dec809-kube-api-access-hdf7f\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.824402 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.955992 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "34fbd2ce-81ba-4a98-ba90-982996dec809" (UID: "34fbd2ce-81ba-4a98-ba90-982996dec809"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.999450 4791 generic.go:334] "Generic (PLEG): container finished" podID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerID="17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6" exitCode=0 Feb 18 00:38:01 crc kubenswrapper[4791]: I0218 00:38:01.999545 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gl6m" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:01.999520 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerDied","Data":"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6"} Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:01.999791 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gl6m" event={"ID":"12887847-f4a6-4fed-b6b5-e30a9c494948","Type":"ContainerDied","Data":"fc4fdb72a058c92a131a1aef7a68aaacc6033a912e103a1d2a5305df88b4485e"} Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:01.999827 4791 scope.go:117] "RemoveContainer" containerID="17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.001686 4791 generic.go:334] "Generic (PLEG): container finished" podID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerID="878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9" exitCode=0 Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.001726 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerDied","Data":"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9"} Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.001774 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9svms" event={"ID":"34fbd2ce-81ba-4a98-ba90-982996dec809","Type":"ContainerDied","Data":"30a37bea908c9d8cd2a06f4813f0318b847ca5b724233b4f7a64ef3970dde163"} Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.001776 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9svms" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.022657 4791 scope.go:117] "RemoveContainer" containerID="734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.025830 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34fbd2ce-81ba-4a98-ba90-982996dec809-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.030347 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.036298 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2gl6m"] Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.041743 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.046977 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.056331 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9svms"] Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.058342 4791 scope.go:117] "RemoveContainer" containerID="240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.071061 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.073094 4791 scope.go:117] "RemoveContainer" containerID="17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.075408 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6\": container with ID starting with 17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6 not found: ID does not exist" containerID="17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.075453 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6"} err="failed to get container status \"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6\": rpc error: code = NotFound desc = could not find container \"17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6\": container with ID starting with 17b3e17fce331a11c40c6b6bce04891676fc9a31922da50eaaad1525b9839fd6 not found: ID does not exist" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.075474 4791 scope.go:117] "RemoveContainer" containerID="734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.077699 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494\": container with ID starting with 734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494 not found: ID does not exist" containerID="734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.077728 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494"} err="failed to get container status \"734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494\": rpc error: code = NotFound desc = could not find container \"734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494\": container with ID starting with 734590c8dc13d6754d66cd588a67f43d2aec6f2565f6ca489831e5f87c317494 not found: ID does not exist" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.077748 4791 scope.go:117] "RemoveContainer" containerID="240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.078242 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227\": container with ID starting with 240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227 not found: ID does not exist" containerID="240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.078290 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227"} err="failed to get container status \"240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227\": rpc error: code = NotFound desc = could not find container \"240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227\": container with ID starting with 240808ad8b2073bee68a6b194b2c0e5ef7cd12030fddfc7400519c0d9ebec227 not found: ID does not exist" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.078325 4791 scope.go:117] "RemoveContainer" containerID="878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.091383 4791 scope.go:117] "RemoveContainer" containerID="e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.126545 4791 scope.go:117] "RemoveContainer" containerID="b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.154576 4791 scope.go:117] "RemoveContainer" containerID="878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.154962 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9\": container with ID starting with 878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9 not found: ID does not exist" containerID="878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.155009 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9"} err="failed to get container status \"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9\": rpc error: code = NotFound desc = could not find container \"878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9\": container with ID starting with 878bc0e2b2f095e508866e65a327e4788f9216f5b725eee7d52a60c6ba66bdc9 not found: ID does not exist" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.155041 4791 scope.go:117] "RemoveContainer" containerID="e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.155323 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d\": container with ID starting with e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d not found: ID does not exist" containerID="e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.155356 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d"} err="failed to get container status \"e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d\": rpc error: code = NotFound desc = could not find container \"e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d\": container with ID starting with e8d1254607cb72f5c0d87c67e988c9005f1b42ae09c876d691e73c29df47559d not found: ID does not exist" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.155377 4791 scope.go:117] "RemoveContainer" containerID="b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37" Feb 18 00:38:02 crc kubenswrapper[4791]: E0218 00:38:02.155540 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37\": container with ID starting with b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37 not found: ID does not exist" containerID="b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37" Feb 18 00:38:02 crc kubenswrapper[4791]: I0218 00:38:02.155561 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37"} err="failed to get container status \"b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37\": rpc error: code = NotFound desc = could not find container \"b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37\": container with ID starting with b88a036cb4494abc0c0913d0c046ee921e075d7b6148bafb48db84af26489b37 not found: ID does not exist" Feb 18 00:38:03 crc kubenswrapper[4791]: I0218 00:38:03.069500 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" path="/var/lib/kubelet/pods/12887847-f4a6-4fed-b6b5-e30a9c494948/volumes" Feb 18 00:38:03 crc kubenswrapper[4791]: I0218 00:38:03.070081 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" path="/var/lib/kubelet/pods/34fbd2ce-81ba-4a98-ba90-982996dec809/volumes" Feb 18 00:38:04 crc kubenswrapper[4791]: I0218 00:38:04.911418 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:38:04 crc kubenswrapper[4791]: I0218 00:38:04.912198 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l97cx" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="registry-server" containerID="cri-o://4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b" gracePeriod=2 Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.332468 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.472543 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities\") pod \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.472687 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpl25\" (UniqueName: \"kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25\") pod \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.472713 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content\") pod \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\" (UID: \"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5\") " Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.473420 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities" (OuterVolumeSpecName: "utilities") pod "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" (UID: "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.478144 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25" (OuterVolumeSpecName: "kube-api-access-mpl25") pod "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" (UID: "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5"). InnerVolumeSpecName "kube-api-access-mpl25". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.480985 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpl25\" (UniqueName: \"kubernetes.io/projected/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-kube-api-access-mpl25\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.481013 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.599580 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" (UID: "5a20a60a-9750-49bc-82b3-5d10ea3fc7f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:38:05 crc kubenswrapper[4791]: I0218 00:38:05.682897 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.024585 4791 generic.go:334] "Generic (PLEG): container finished" podID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerID="4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b" exitCode=0 Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.024631 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerDied","Data":"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b"} Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.024657 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l97cx" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.024680 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l97cx" event={"ID":"5a20a60a-9750-49bc-82b3-5d10ea3fc7f5","Type":"ContainerDied","Data":"7655117cb5e68f77392a25cd3d6fc53978c457c68739c1180d3e8ba5944fd69b"} Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.024705 4791 scope.go:117] "RemoveContainer" containerID="4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.042905 4791 scope.go:117] "RemoveContainer" containerID="d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.055046 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.057397 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l97cx"] Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.076252 4791 scope.go:117] "RemoveContainer" containerID="a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.087690 4791 scope.go:117] "RemoveContainer" containerID="4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b" Feb 18 00:38:06 crc kubenswrapper[4791]: E0218 00:38:06.088031 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b\": container with ID starting with 4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b not found: ID does not exist" containerID="4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.088121 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b"} err="failed to get container status \"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b\": rpc error: code = NotFound desc = could not find container \"4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b\": container with ID starting with 4b7cd35894336565c619ea0d8bd823da3b3fb59c439444d4e97a2d27c3a2bf3b not found: ID does not exist" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.088230 4791 scope.go:117] "RemoveContainer" containerID="d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5" Feb 18 00:38:06 crc kubenswrapper[4791]: E0218 00:38:06.088632 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5\": container with ID starting with d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5 not found: ID does not exist" containerID="d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.088678 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5"} err="failed to get container status \"d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5\": rpc error: code = NotFound desc = could not find container \"d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5\": container with ID starting with d35a3233c430af6657e8a8a19bbcf2b019cb94561511a6cd6b8d2788c4c4afd5 not found: ID does not exist" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.088706 4791 scope.go:117] "RemoveContainer" containerID="a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0" Feb 18 00:38:06 crc kubenswrapper[4791]: E0218 00:38:06.089258 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0\": container with ID starting with a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0 not found: ID does not exist" containerID="a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0" Feb 18 00:38:06 crc kubenswrapper[4791]: I0218 00:38:06.089292 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0"} err="failed to get container status \"a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0\": rpc error: code = NotFound desc = could not find container \"a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0\": container with ID starting with a6b314c7a5cb0f88973f88db5e24b06234077e1cc66cae05f408b1b7f99a6cd0 not found: ID does not exist" Feb 18 00:38:07 crc kubenswrapper[4791]: I0218 00:38:07.074024 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" path="/var/lib/kubelet/pods/5a20a60a-9750-49bc-82b3-5d10ea3fc7f5/volumes" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.357720 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerName="oauth-openshift" containerID="cri-o://3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947" gracePeriod=15 Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.727121 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.755864 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-fc667b7f-kngqb"] Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756050 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756062 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756072 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756080 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756088 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756094 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756102 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756109 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756117 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756123 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756136 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756142 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756151 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756169 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756178 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756183 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756190 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756195 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="extract-utilities" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756203 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerName="oauth-openshift" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756209 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerName="oauth-openshift" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756217 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756222 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756230 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756236 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: E0218 00:38:14.756244 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756250 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="extract-content" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756329 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f5a9f53-3f5e-44d5-b322-fb0910613ad8" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756339 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerName="oauth-openshift" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756353 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a20a60a-9750-49bc-82b3-5d10ea3fc7f5" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756360 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="34fbd2ce-81ba-4a98-ba90-982996dec809" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756368 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="12887847-f4a6-4fed-b6b5-e30a9c494948" containerName="registry-server" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.756753 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.768448 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-fc667b7f-kngqb"] Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839478 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839532 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839568 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839585 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839606 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839627 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kgdr\" (UniqueName: \"kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839656 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839678 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839698 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839714 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839745 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.839805 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840606 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840642 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840659 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840773 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840800 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840846 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.840875 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert\") pod \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\" (UID: \"dc760ce4-d81e-4d1e-9341-1d8b40596f4c\") " Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841058 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-policies\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841107 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841151 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841230 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txpr7\" (UniqueName: \"kubernetes.io/projected/89f4085e-cd33-4c33-8a20-6bb1af2c364d-kube-api-access-txpr7\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841284 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-router-certs\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841320 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-dir\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841358 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-error\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841380 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841432 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-session\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841483 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841512 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841545 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841660 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-service-ca\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841685 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-login\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841771 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841786 4791 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841800 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841813 4791 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.841826 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.845221 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr" (OuterVolumeSpecName: "kube-api-access-7kgdr") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "kube-api-access-7kgdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.845911 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.846627 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.846743 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.847314 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.848112 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.848343 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.850369 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.850635 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "dc760ce4-d81e-4d1e-9341-1d8b40596f4c" (UID: "dc760ce4-d81e-4d1e-9341-1d8b40596f4c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943150 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-session\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943246 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943274 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943300 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943341 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-service-ca\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943367 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-login\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943412 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-policies\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943436 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943457 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943476 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txpr7\" (UniqueName: \"kubernetes.io/projected/89f4085e-cd33-4c33-8a20-6bb1af2c364d-kube-api-access-txpr7\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943505 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-router-certs\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943527 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-dir\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943545 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-error\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943570 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943620 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943636 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943653 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943670 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943688 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943704 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943723 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kgdr\" (UniqueName: \"kubernetes.io/projected/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-kube-api-access-7kgdr\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943741 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.943757 4791 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dc760ce4-d81e-4d1e-9341-1d8b40596f4c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.944500 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-dir\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.945053 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.945076 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-service-ca\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.946077 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.946993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/89f4085e-cd33-4c33-8a20-6bb1af2c364d-audit-policies\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.947041 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-session\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.948008 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.948724 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.948899 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.949146 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-error\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.949211 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-user-template-login\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.950908 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.956521 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/89f4085e-cd33-4c33-8a20-6bb1af2c364d-v4-0-config-system-router-certs\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:14 crc kubenswrapper[4791]: I0218 00:38:14.970089 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txpr7\" (UniqueName: \"kubernetes.io/projected/89f4085e-cd33-4c33-8a20-6bb1af2c364d-kube-api-access-txpr7\") pod \"oauth-openshift-fc667b7f-kngqb\" (UID: \"89f4085e-cd33-4c33-8a20-6bb1af2c364d\") " pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.076006 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.111596 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" containerID="3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947" exitCode=0 Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.111727 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.111762 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" event={"ID":"dc760ce4-d81e-4d1e-9341-1d8b40596f4c","Type":"ContainerDied","Data":"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947"} Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.112115 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gxtkn" event={"ID":"dc760ce4-d81e-4d1e-9341-1d8b40596f4c","Type":"ContainerDied","Data":"c52bf3b8041eb179d87b05e9a2a9f7ee6f9dc8f453e6c7095c46e8d3a1d4ffd8"} Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.112191 4791 scope.go:117] "RemoveContainer" containerID="3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.150068 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.155539 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gxtkn"] Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.166920 4791 scope.go:117] "RemoveContainer" containerID="3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947" Feb 18 00:38:15 crc kubenswrapper[4791]: E0218 00:38:15.167639 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947\": container with ID starting with 3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947 not found: ID does not exist" containerID="3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.167688 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947"} err="failed to get container status \"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947\": rpc error: code = NotFound desc = could not find container \"3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947\": container with ID starting with 3e32f61575b994c217f8f754ac5b33bc89fc89650385d8ec602e00bad0bf6947 not found: ID does not exist" Feb 18 00:38:15 crc kubenswrapper[4791]: I0218 00:38:15.317825 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-fc667b7f-kngqb"] Feb 18 00:38:15 crc kubenswrapper[4791]: W0218 00:38:15.322699 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89f4085e_cd33_4c33_8a20_6bb1af2c364d.slice/crio-1abb8be3686cd6321b39fccab901924569cbf2da572f7e5f2ac0d0190f0536db WatchSource:0}: Error finding container 1abb8be3686cd6321b39fccab901924569cbf2da572f7e5f2ac0d0190f0536db: Status 404 returned error can't find the container with id 1abb8be3686cd6321b39fccab901924569cbf2da572f7e5f2ac0d0190f0536db Feb 18 00:38:16 crc kubenswrapper[4791]: I0218 00:38:16.117664 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" event={"ID":"89f4085e-cd33-4c33-8a20-6bb1af2c364d","Type":"ContainerStarted","Data":"43fba396a5c8a437789f204237078c9dd8c892bb403a66364d64bc919164a4f6"} Feb 18 00:38:16 crc kubenswrapper[4791]: I0218 00:38:16.118802 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" event={"ID":"89f4085e-cd33-4c33-8a20-6bb1af2c364d","Type":"ContainerStarted","Data":"1abb8be3686cd6321b39fccab901924569cbf2da572f7e5f2ac0d0190f0536db"} Feb 18 00:38:16 crc kubenswrapper[4791]: I0218 00:38:16.118884 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:16 crc kubenswrapper[4791]: I0218 00:38:16.145754 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" podStartSLOduration=27.145740856 podStartE2EDuration="27.145740856s" podCreationTimestamp="2026-02-18 00:37:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:38:16.14367791 +0000 UTC m=+237.711691100" watchObservedRunningTime="2026-02-18 00:38:16.145740856 +0000 UTC m=+237.713754026" Feb 18 00:38:16 crc kubenswrapper[4791]: I0218 00:38:16.278921 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-fc667b7f-kngqb" Feb 18 00:38:17 crc kubenswrapper[4791]: I0218 00:38:17.067979 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc760ce4-d81e-4d1e-9341-1d8b40596f4c" path="/var/lib/kubelet/pods/dc760ce4-d81e-4d1e-9341-1d8b40596f4c/volumes" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.924883 4791 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926033 4791 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926192 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926594 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f" gracePeriod=15 Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926659 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02" gracePeriod=15 Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926600 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789" gracePeriod=15 Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926780 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70" gracePeriod=15 Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.926552 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16" gracePeriod=15 Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927333 4791 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927613 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927628 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927642 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927651 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927680 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927690 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927710 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927719 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927737 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927746 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927762 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927772 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.927784 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.927794 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928000 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928013 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928025 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928036 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928045 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928054 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928066 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 18 00:38:21 crc kubenswrapper[4791]: E0218 00:38:21.928235 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:21 crc kubenswrapper[4791]: I0218 00:38:21.928249 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055674 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055908 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055936 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055958 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055976 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.055995 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.056016 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.056041 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.152360 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.153636 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.154322 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789" exitCode=0 Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.154346 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f" exitCode=0 Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.154353 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02" exitCode=0 Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.154361 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70" exitCode=2 Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.154374 4791 scope.go:117] "RemoveContainer" containerID="05d77f463624a6220851972dbbbf6f85bc850729e25e2bb6e1d9e57a1cf9bcc8" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.156188 4791 generic.go:334] "Generic (PLEG): container finished" podID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" containerID="8cf7da1bf157d81bb8aa1be96a661711d9e33d35942f2a7466eb0fc1b61daa06" exitCode=0 Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.156238 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3fd8eea0-19ca-4527-9a36-2d7cd67fba45","Type":"ContainerDied","Data":"8cf7da1bf157d81bb8aa1be96a661711d9e33d35942f2a7466eb0fc1b61daa06"} Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.156787 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.156892 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157202 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157589 4791 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157698 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157756 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157801 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157840 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157854 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157864 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157885 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157930 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.157989 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.158049 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.158228 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.158269 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.158275 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:22 crc kubenswrapper[4791]: I0218 00:38:22.158278 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.165502 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.478199 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.479141 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.575484 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access\") pod \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.575549 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir\") pod \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.575621 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock\") pod \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\" (UID: \"3fd8eea0-19ca-4527-9a36-2d7cd67fba45\") " Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.575770 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3fd8eea0-19ca-4527-9a36-2d7cd67fba45" (UID: "3fd8eea0-19ca-4527-9a36-2d7cd67fba45"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.575842 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock" (OuterVolumeSpecName: "var-lock") pod "3fd8eea0-19ca-4527-9a36-2d7cd67fba45" (UID: "3fd8eea0-19ca-4527-9a36-2d7cd67fba45"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.576075 4791 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-var-lock\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.576103 4791 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.583894 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3fd8eea0-19ca-4527-9a36-2d7cd67fba45" (UID: "3fd8eea0-19ca-4527-9a36-2d7cd67fba45"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:38:23 crc kubenswrapper[4791]: I0218 00:38:23.677125 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fd8eea0-19ca-4527-9a36-2d7cd67fba45-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.173211 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3fd8eea0-19ca-4527-9a36-2d7cd67fba45","Type":"ContainerDied","Data":"2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9"} Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.173255 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b77534550452c38b532dca33d90e358b3dec048557d8e58d4545ae580b19fb9" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.173273 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.275397 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.279522 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.280302 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.280917 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.281478 4791 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.392761 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.392864 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.392888 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.392948 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.392986 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.393086 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.393296 4791 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.393338 4791 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:24 crc kubenswrapper[4791]: I0218 00:38:24.393359 4791 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.067681 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.181999 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.182992 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16" exitCode=0 Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.183073 4791 scope.go:117] "RemoveContainer" containerID="e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.183333 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.183935 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.185097 4791 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.187673 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.188362 4791 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.203195 4791 scope.go:117] "RemoveContainer" containerID="440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.217023 4791 scope.go:117] "RemoveContainer" containerID="8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.232181 4791 scope.go:117] "RemoveContainer" containerID="d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.249331 4791 scope.go:117] "RemoveContainer" containerID="38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.265098 4791 scope.go:117] "RemoveContainer" containerID="c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.288822 4791 scope.go:117] "RemoveContainer" containerID="e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.289432 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\": container with ID starting with e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789 not found: ID does not exist" containerID="e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.289488 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789"} err="failed to get container status \"e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\": rpc error: code = NotFound desc = could not find container \"e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789\": container with ID starting with e5c41479d5089b48a439d4ea96fb7d33c0512e1c8a5d0c2c845eeda0eec6e789 not found: ID does not exist" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.289523 4791 scope.go:117] "RemoveContainer" containerID="440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.290147 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\": container with ID starting with 440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f not found: ID does not exist" containerID="440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.290283 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f"} err="failed to get container status \"440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\": rpc error: code = NotFound desc = could not find container \"440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f\": container with ID starting with 440cf55c5c7ca86593eddb1e51d7e66e3ae0930c9615e531ed833779efdfb20f not found: ID does not exist" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.290324 4791 scope.go:117] "RemoveContainer" containerID="8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.290841 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\": container with ID starting with 8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02 not found: ID does not exist" containerID="8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.290864 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02"} err="failed to get container status \"8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\": rpc error: code = NotFound desc = could not find container \"8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02\": container with ID starting with 8fcc323745600ce50e2cd60d5ea63df95869dc346fdb2087c58dfd52bd4dcd02 not found: ID does not exist" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.290878 4791 scope.go:117] "RemoveContainer" containerID="d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.291260 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\": container with ID starting with d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70 not found: ID does not exist" containerID="d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.291282 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70"} err="failed to get container status \"d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\": rpc error: code = NotFound desc = could not find container \"d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70\": container with ID starting with d52d21670361a6373c544dc4dd7456f83644b0cc0065da08c7505a1553d41b70 not found: ID does not exist" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.291294 4791 scope.go:117] "RemoveContainer" containerID="38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.291557 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\": container with ID starting with 38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16 not found: ID does not exist" containerID="38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.291582 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16"} err="failed to get container status \"38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\": rpc error: code = NotFound desc = could not find container \"38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16\": container with ID starting with 38280f004168456f1d406d27d4ad6d205a1f5becb39f439628e0451323b63a16 not found: ID does not exist" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.291600 4791 scope.go:117] "RemoveContainer" containerID="c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78" Feb 18 00:38:25 crc kubenswrapper[4791]: E0218 00:38:25.291874 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\": container with ID starting with c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78 not found: ID does not exist" containerID="c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78" Feb 18 00:38:25 crc kubenswrapper[4791]: I0218 00:38:25.291900 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78"} err="failed to get container status \"c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\": rpc error: code = NotFound desc = could not find container \"c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78\": container with ID starting with c753a4f804c139ec555792a520babc19ea0fcd1daae7d4ecea464a73abbf2f78 not found: ID does not exist" Feb 18 00:38:26 crc kubenswrapper[4791]: E0218 00:38:26.961272 4791 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:26 crc kubenswrapper[4791]: I0218 00:38:26.961861 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:26 crc kubenswrapper[4791]: E0218 00:38:26.989710 4791 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18953046500b6821 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-18 00:38:26.988927009 +0000 UTC m=+248.556940189,LastTimestamp:2026-02-18 00:38:26.988927009 +0000 UTC m=+248.556940189,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 18 00:38:27 crc kubenswrapper[4791]: I0218 00:38:27.193603 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ec45adbbb544f0ac1f134bddfbafbf08ace6847ee9f36b8364b91068b2a1f008"} Feb 18 00:38:28 crc kubenswrapper[4791]: I0218 00:38:28.200925 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba"} Feb 18 00:38:28 crc kubenswrapper[4791]: I0218 00:38:28.201679 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:28 crc kubenswrapper[4791]: E0218 00:38:28.201839 4791 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:29 crc kubenswrapper[4791]: I0218 00:38:29.065082 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:29 crc kubenswrapper[4791]: E0218 00:38:29.207315 4791 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.460292 4791 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.460711 4791 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.461078 4791 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.461498 4791 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.461691 4791 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:30 crc kubenswrapper[4791]: I0218 00:38:30.461723 4791 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.461871 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="200ms" Feb 18 00:38:30 crc kubenswrapper[4791]: E0218 00:38:30.662864 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="400ms" Feb 18 00:38:31 crc kubenswrapper[4791]: E0218 00:38:31.063944 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="800ms" Feb 18 00:38:31 crc kubenswrapper[4791]: E0218 00:38:31.865532 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="1.6s" Feb 18 00:38:33 crc kubenswrapper[4791]: E0218 00:38:33.466213 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="3.2s" Feb 18 00:38:35 crc kubenswrapper[4791]: E0218 00:38:35.027083 4791 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.113:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.18953046500b6821 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-18 00:38:26.988927009 +0000 UTC m=+248.556940189,LastTimestamp:2026-02-18 00:38:26.988927009 +0000 UTC m=+248.556940189,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 18 00:38:36 crc kubenswrapper[4791]: E0218 00:38:36.666916 4791 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.113:6443: connect: connection refused" interval="6.4s" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.060754 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.061504 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.074196 4791 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.074239 4791 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:37 crc kubenswrapper[4791]: E0218 00:38:37.074659 4791 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.075107 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:37 crc kubenswrapper[4791]: W0218 00:38:37.103382 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-04b925a9e5f9ddeb9c4d927ad4119d6a959352f0b1724ef301c073b72f8f8792 WatchSource:0}: Error finding container 04b925a9e5f9ddeb9c4d927ad4119d6a959352f0b1724ef301c073b72f8f8792: Status 404 returned error can't find the container with id 04b925a9e5f9ddeb9c4d927ad4119d6a959352f0b1724ef301c073b72f8f8792 Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.257641 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"04b925a9e5f9ddeb9c4d927ad4119d6a959352f0b1724ef301c073b72f8f8792"} Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.260350 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.260390 4791 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9" exitCode=1 Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.260415 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9"} Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.260756 4791 scope.go:117] "RemoveContainer" containerID="dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.261116 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:37 crc kubenswrapper[4791]: I0218 00:38:37.261376 4791 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.269864 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.270303 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4ef9fec7155213155f4402f7a89168f26e9fcfe24861e459ba4bb7642aa0adc7"} Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.271357 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.271902 4791 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.272976 4791 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="ad74edc8415b6e8af363b1544b441a4d854469535a14fab09ea59348129761c0" exitCode=0 Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.273031 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"ad74edc8415b6e8af363b1544b441a4d854469535a14fab09ea59348129761c0"} Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.273519 4791 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.273552 4791 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:38 crc kubenswrapper[4791]: E0218 00:38:38.274006 4791 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.274345 4791 status_manager.go:851] "Failed to get status for pod" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:38 crc kubenswrapper[4791]: I0218 00:38:38.274903 4791 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.113:6443: connect: connection refused" Feb 18 00:38:39 crc kubenswrapper[4791]: I0218 00:38:39.285474 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"123771f432cf48a216c0523856a08f13b1feac3d1164a7c806cd202fbf40fbd0"} Feb 18 00:38:39 crc kubenswrapper[4791]: I0218 00:38:39.285772 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0cab8f95eb38fe28f9297111520b5b38d6146463da6ca62b377fa8868c460bfc"} Feb 18 00:38:39 crc kubenswrapper[4791]: I0218 00:38:39.285782 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"378d0ec23fb77237b96769a5354216071af91ad8f5417449df69327da82d49b1"} Feb 18 00:38:39 crc kubenswrapper[4791]: I0218 00:38:39.285791 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0e7fef2ca75e6c0baa55e240a6573bb10411b410b038a121d2145c21d4a22a56"} Feb 18 00:38:40 crc kubenswrapper[4791]: I0218 00:38:40.293976 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c86eb85524e03d66888376cdeeb6429942dab8428ff3bfc83a0604d0b1ed93a4"} Feb 18 00:38:40 crc kubenswrapper[4791]: I0218 00:38:40.294305 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:40 crc kubenswrapper[4791]: I0218 00:38:40.294367 4791 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:40 crc kubenswrapper[4791]: I0218 00:38:40.294398 4791 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:42 crc kubenswrapper[4791]: I0218 00:38:42.075779 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:42 crc kubenswrapper[4791]: I0218 00:38:42.076104 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:42 crc kubenswrapper[4791]: I0218 00:38:42.084333 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:42 crc kubenswrapper[4791]: I0218 00:38:42.255832 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:38:43 crc kubenswrapper[4791]: I0218 00:38:43.321871 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:38:43 crc kubenswrapper[4791]: I0218 00:38:43.322248 4791 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 18 00:38:43 crc kubenswrapper[4791]: I0218 00:38:43.322325 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 18 00:38:45 crc kubenswrapper[4791]: I0218 00:38:45.304387 4791 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:45 crc kubenswrapper[4791]: I0218 00:38:45.322509 4791 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:45 crc kubenswrapper[4791]: I0218 00:38:45.322761 4791 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:45 crc kubenswrapper[4791]: I0218 00:38:45.326347 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:38:45 crc kubenswrapper[4791]: I0218 00:38:45.328326 4791 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="5bb6238d-0de1-4e32-9a3c-41e8b236fcd9" Feb 18 00:38:46 crc kubenswrapper[4791]: I0218 00:38:46.328523 4791 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:46 crc kubenswrapper[4791]: I0218 00:38:46.328552 4791 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a468f665-a84a-4bfc-9a2c-672ad3c3cba6" Feb 18 00:38:49 crc kubenswrapper[4791]: I0218 00:38:49.075406 4791 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="5bb6238d-0de1-4e32-9a3c-41e8b236fcd9" Feb 18 00:38:53 crc kubenswrapper[4791]: I0218 00:38:53.321533 4791 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 18 00:38:53 crc kubenswrapper[4791]: I0218 00:38:53.322241 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 18 00:38:54 crc kubenswrapper[4791]: I0218 00:38:54.788868 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 18 00:38:55 crc kubenswrapper[4791]: I0218 00:38:55.579404 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 18 00:38:55 crc kubenswrapper[4791]: I0218 00:38:55.755817 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 18 00:38:55 crc kubenswrapper[4791]: I0218 00:38:55.886816 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 18 00:38:55 crc kubenswrapper[4791]: I0218 00:38:55.985588 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 18 00:38:56 crc kubenswrapper[4791]: I0218 00:38:56.722788 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 18 00:38:56 crc kubenswrapper[4791]: I0218 00:38:56.774407 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.090395 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.164004 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.241567 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.370671 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.466416 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.467218 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.486770 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.633880 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.667151 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 18 00:38:57 crc kubenswrapper[4791]: I0218 00:38:57.818242 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.034112 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.071507 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.086574 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.141437 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.292570 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.342904 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.386331 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.446533 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.520361 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.563428 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.728792 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 18 00:38:58 crc kubenswrapper[4791]: I0218 00:38:58.956936 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.006338 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.010737 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.180669 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.261300 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.412427 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.436426 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.511023 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.521799 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.609505 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.806874 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.822913 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 18 00:38:59 crc kubenswrapper[4791]: I0218 00:38:59.864776 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.210818 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.299570 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.342932 4791 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.389977 4791 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.433015 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.464067 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.497106 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.503837 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.503977 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.506260 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.570537 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.616402 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.885925 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.888325 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.953127 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.957529 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.974915 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 18 00:39:00 crc kubenswrapper[4791]: I0218 00:39:00.977233 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.020755 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.103034 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.117006 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.137887 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.170500 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.315305 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.334777 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.364352 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.378195 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.437100 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.437308 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.551269 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.562317 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.563329 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.598544 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.630933 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.672678 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.692128 4791 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.697210 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.697262 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.701733 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.712027 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=16.71196652 podStartE2EDuration="16.71196652s" podCreationTimestamp="2026-02-18 00:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:39:01.711760203 +0000 UTC m=+283.279773383" watchObservedRunningTime="2026-02-18 00:39:01.71196652 +0000 UTC m=+283.279979700" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.730045 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.733987 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.745930 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.776418 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 18 00:39:01 crc kubenswrapper[4791]: I0218 00:39:01.919740 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.002644 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.030742 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.083502 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.160041 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.170187 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.175355 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.251624 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.279271 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.346483 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.353894 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.384513 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.449096 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.470320 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.515149 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.571538 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.723979 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.724010 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.820579 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.854873 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.914416 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 18 00:39:02 crc kubenswrapper[4791]: I0218 00:39:02.925254 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.019112 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.033859 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.137561 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.200605 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.315033 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.322260 4791 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.322324 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.322495 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.323441 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"4ef9fec7155213155f4402f7a89168f26e9fcfe24861e459ba4bb7642aa0adc7"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.323832 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://4ef9fec7155213155f4402f7a89168f26e9fcfe24861e459ba4bb7642aa0adc7" gracePeriod=30 Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.332009 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.350714 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.403114 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.561506 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.577376 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.622101 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.703666 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.784500 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.799963 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.810270 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.817283 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.857780 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.886791 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.907749 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.910587 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.974927 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 18 00:39:03 crc kubenswrapper[4791]: I0218 00:39:03.992755 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.073931 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.101774 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.122044 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.189581 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.196945 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.230502 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.360715 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.360732 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.366691 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.395174 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.497506 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.523286 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.570489 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.596692 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.601861 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.713285 4791 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.747415 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.759013 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.804366 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.823786 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.885371 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.895352 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.961264 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 18 00:39:04 crc kubenswrapper[4791]: I0218 00:39:04.990530 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.018428 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.060950 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.067783 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.513190 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.514568 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.521468 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.522839 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.610827 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.636062 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.795368 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.850793 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.878379 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.895899 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.946079 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 18 00:39:05 crc kubenswrapper[4791]: I0218 00:39:05.960439 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.104658 4791 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.140427 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.192310 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.201133 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.253638 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.313764 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.332610 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.357898 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.397789 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.456479 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.479640 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.555675 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.591191 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.601260 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.621738 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.621961 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.627959 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.656206 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.663435 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.796784 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.840136 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.944078 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 18 00:39:06 crc kubenswrapper[4791]: I0218 00:39:06.950485 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.002441 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.094855 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.147828 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.164861 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.259912 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.280638 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.283950 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.331263 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.337355 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.342089 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.432680 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.462604 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.487390 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.673833 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.772322 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.778473 4791 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.778700 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba" gracePeriod=5 Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.846101 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 18 00:39:07 crc kubenswrapper[4791]: I0218 00:39:07.978713 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.038326 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.052768 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.303646 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.362276 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.447398 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.597226 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.705375 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.706350 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.750066 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.829065 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.830222 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.926564 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 18 00:39:08 crc kubenswrapper[4791]: I0218 00:39:08.936753 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.106877 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.172330 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.199500 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.247021 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.290040 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.502385 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.502511 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.529209 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.747422 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.803237 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.947582 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.960916 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 18 00:39:09 crc kubenswrapper[4791]: I0218 00:39:09.967359 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.015228 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.357995 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.406047 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.539346 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.657589 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 18 00:39:10 crc kubenswrapper[4791]: I0218 00:39:10.750207 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 18 00:39:12 crc kubenswrapper[4791]: I0218 00:39:12.907151 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 18 00:39:12 crc kubenswrapper[4791]: I0218 00:39:12.907467 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049445 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049566 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049617 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049629 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049681 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049743 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049756 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.049808 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.050139 4791 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.050198 4791 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.050215 4791 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.050257 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.057694 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.069442 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.151749 4791 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.151792 4791 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.479326 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.479406 4791 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba" exitCode=137 Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.479461 4791 scope.go:117] "RemoveContainer" containerID="a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.479505 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.500983 4791 scope.go:117] "RemoveContainer" containerID="a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba" Feb 18 00:39:13 crc kubenswrapper[4791]: E0218 00:39:13.501648 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba\": container with ID starting with a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba not found: ID does not exist" containerID="a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba" Feb 18 00:39:13 crc kubenswrapper[4791]: I0218 00:39:13.501736 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba"} err="failed to get container status \"a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba\": rpc error: code = NotFound desc = could not find container \"a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba\": container with ID starting with a59fb2f157bfea4975b46639f7f5235a33771e95ef49d35496b24dd8897b86ba not found: ID does not exist" Feb 18 00:39:18 crc kubenswrapper[4791]: I0218 00:39:18.876832 4791 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Feb 18 00:39:19 crc kubenswrapper[4791]: I0218 00:39:19.797383 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 18 00:39:25 crc kubenswrapper[4791]: I0218 00:39:25.044664 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 18 00:39:27 crc kubenswrapper[4791]: I0218 00:39:27.473291 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 18 00:39:27 crc kubenswrapper[4791]: I0218 00:39:27.562229 4791 generic.go:334] "Generic (PLEG): container finished" podID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerID="c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3" exitCode=0 Feb 18 00:39:27 crc kubenswrapper[4791]: I0218 00:39:27.562298 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerDied","Data":"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3"} Feb 18 00:39:27 crc kubenswrapper[4791]: I0218 00:39:27.562806 4791 scope.go:117] "RemoveContainer" containerID="c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3" Feb 18 00:39:28 crc kubenswrapper[4791]: I0218 00:39:28.569250 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerStarted","Data":"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0"} Feb 18 00:39:28 crc kubenswrapper[4791]: I0218 00:39:28.569859 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:39:28 crc kubenswrapper[4791]: I0218 00:39:28.570834 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:39:29 crc kubenswrapper[4791]: I0218 00:39:29.812310 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 18 00:39:30 crc kubenswrapper[4791]: I0218 00:39:30.576047 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 18 00:39:30 crc kubenswrapper[4791]: I0218 00:39:30.885968 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.548401 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.595746 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.599344 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.599417 4791 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="4ef9fec7155213155f4402f7a89168f26e9fcfe24861e459ba4bb7642aa0adc7" exitCode=137 Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.599486 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"4ef9fec7155213155f4402f7a89168f26e9fcfe24861e459ba4bb7642aa0adc7"} Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.599599 4791 scope.go:117] "RemoveContainer" containerID="dcea2fa10b95dd901e1ec55e4440f71ad36a561f698215d4fdc5126e49df0ed9" Feb 18 00:39:33 crc kubenswrapper[4791]: I0218 00:39:33.733941 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 18 00:39:34 crc kubenswrapper[4791]: I0218 00:39:34.609023 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Feb 18 00:39:34 crc kubenswrapper[4791]: I0218 00:39:34.610253 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"de5f6af75df420a827303be8f4bbafbc4583701a4ebdae57cdfe14f4a74104cf"} Feb 18 00:39:35 crc kubenswrapper[4791]: I0218 00:39:35.155432 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 18 00:39:37 crc kubenswrapper[4791]: I0218 00:39:37.416751 4791 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 18 00:39:38 crc kubenswrapper[4791]: I0218 00:39:38.998304 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 18 00:39:39 crc kubenswrapper[4791]: I0218 00:39:39.300269 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 18 00:39:39 crc kubenswrapper[4791]: I0218 00:39:39.986804 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 18 00:39:41 crc kubenswrapper[4791]: I0218 00:39:41.404872 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 18 00:39:41 crc kubenswrapper[4791]: I0218 00:39:41.413948 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 18 00:39:42 crc kubenswrapper[4791]: I0218 00:39:42.256443 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:39:43 crc kubenswrapper[4791]: I0218 00:39:43.322103 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:39:43 crc kubenswrapper[4791]: I0218 00:39:43.328304 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:39:43 crc kubenswrapper[4791]: I0218 00:39:43.671078 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 18 00:39:44 crc kubenswrapper[4791]: I0218 00:39:44.280772 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 18 00:39:58 crc kubenswrapper[4791]: I0218 00:39:58.650424 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:39:58 crc kubenswrapper[4791]: I0218 00:39:58.651281 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerName="controller-manager" containerID="cri-o://29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db" gracePeriod=30 Feb 18 00:39:58 crc kubenswrapper[4791]: I0218 00:39:58.659805 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:39:58 crc kubenswrapper[4791]: I0218 00:39:58.660107 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerName="route-controller-manager" containerID="cri-o://c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9" gracePeriod=30 Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.130227 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.139308 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176051 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca\") pod \"c1261ad9-b566-419e-ad9a-e7b361edd24a\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176100 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert\") pod \"da348cdf-f9c8-4e7c-b462-7d4979dada11\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176171 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles\") pod \"da348cdf-f9c8-4e7c-b462-7d4979dada11\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176199 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl5sf\" (UniqueName: \"kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf\") pod \"da348cdf-f9c8-4e7c-b462-7d4979dada11\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176216 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert\") pod \"c1261ad9-b566-419e-ad9a-e7b361edd24a\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176239 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tqvq\" (UniqueName: \"kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq\") pod \"c1261ad9-b566-419e-ad9a-e7b361edd24a\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176259 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config\") pod \"c1261ad9-b566-419e-ad9a-e7b361edd24a\" (UID: \"c1261ad9-b566-419e-ad9a-e7b361edd24a\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176285 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca\") pod \"da348cdf-f9c8-4e7c-b462-7d4979dada11\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.176306 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config\") pod \"da348cdf-f9c8-4e7c-b462-7d4979dada11\" (UID: \"da348cdf-f9c8-4e7c-b462-7d4979dada11\") " Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.177287 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config" (OuterVolumeSpecName: "config") pod "c1261ad9-b566-419e-ad9a-e7b361edd24a" (UID: "c1261ad9-b566-419e-ad9a-e7b361edd24a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.177541 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca" (OuterVolumeSpecName: "client-ca") pod "c1261ad9-b566-419e-ad9a-e7b361edd24a" (UID: "c1261ad9-b566-419e-ad9a-e7b361edd24a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.177593 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "da348cdf-f9c8-4e7c-b462-7d4979dada11" (UID: "da348cdf-f9c8-4e7c-b462-7d4979dada11"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.177614 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config" (OuterVolumeSpecName: "config") pod "da348cdf-f9c8-4e7c-b462-7d4979dada11" (UID: "da348cdf-f9c8-4e7c-b462-7d4979dada11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.177935 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca" (OuterVolumeSpecName: "client-ca") pod "da348cdf-f9c8-4e7c-b462-7d4979dada11" (UID: "da348cdf-f9c8-4e7c-b462-7d4979dada11"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.181434 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c1261ad9-b566-419e-ad9a-e7b361edd24a" (UID: "c1261ad9-b566-419e-ad9a-e7b361edd24a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.181657 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf" (OuterVolumeSpecName: "kube-api-access-kl5sf") pod "da348cdf-f9c8-4e7c-b462-7d4979dada11" (UID: "da348cdf-f9c8-4e7c-b462-7d4979dada11"). InnerVolumeSpecName "kube-api-access-kl5sf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.181708 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "da348cdf-f9c8-4e7c-b462-7d4979dada11" (UID: "da348cdf-f9c8-4e7c-b462-7d4979dada11"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.181768 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq" (OuterVolumeSpecName: "kube-api-access-8tqvq") pod "c1261ad9-b566-419e-ad9a-e7b361edd24a" (UID: "c1261ad9-b566-419e-ad9a-e7b361edd24a"). InnerVolumeSpecName "kube-api-access-8tqvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278213 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da348cdf-f9c8-4e7c-b462-7d4979dada11-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278249 4791 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278265 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kl5sf\" (UniqueName: \"kubernetes.io/projected/da348cdf-f9c8-4e7c-b462-7d4979dada11-kube-api-access-kl5sf\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278280 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1261ad9-b566-419e-ad9a-e7b361edd24a-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278297 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tqvq\" (UniqueName: \"kubernetes.io/projected/c1261ad9-b566-419e-ad9a-e7b361edd24a-kube-api-access-8tqvq\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278308 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278321 4791 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-client-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278328 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da348cdf-f9c8-4e7c-b462-7d4979dada11-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.278336 4791 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c1261ad9-b566-419e-ad9a-e7b361edd24a-client-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.366666 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6"] Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.366911 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerName="controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.366933 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerName="controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.366954 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerName="route-controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.366963 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerName="route-controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.366984 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.366993 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.367004 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" containerName="installer" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367013 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" containerName="installer" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367150 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerName="controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367190 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fd8eea0-19ca-4527-9a36-2d7cd67fba45" containerName="installer" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367201 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerName="route-controller-manager" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367212 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.367629 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.375210 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6"] Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.378662 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-config\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.378719 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8745b3-510d-40df-8c5c-449d5b7b4a99-serving-cert\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.378744 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvh9b\" (UniqueName: \"kubernetes.io/projected/dd8745b3-510d-40df-8c5c-449d5b7b4a99-kube-api-access-hvh9b\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.378761 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-proxy-ca-bundles\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.378790 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-client-ca\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.479910 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-config\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.479994 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8745b3-510d-40df-8c5c-449d5b7b4a99-serving-cert\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.480144 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvh9b\" (UniqueName: \"kubernetes.io/projected/dd8745b3-510d-40df-8c5c-449d5b7b4a99-kube-api-access-hvh9b\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.480217 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-proxy-ca-bundles\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.480293 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-client-ca\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.481297 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-client-ca\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.481519 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-proxy-ca-bundles\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.481666 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8745b3-510d-40df-8c5c-449d5b7b4a99-config\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.483350 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8745b3-510d-40df-8c5c-449d5b7b4a99-serving-cert\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.506031 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvh9b\" (UniqueName: \"kubernetes.io/projected/dd8745b3-510d-40df-8c5c-449d5b7b4a99-kube-api-access-hvh9b\") pod \"controller-manager-5b998fd5d9-vg6m6\" (UID: \"dd8745b3-510d-40df-8c5c-449d5b7b4a99\") " pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.681299 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.752638 4791 generic.go:334] "Generic (PLEG): container finished" podID="c1261ad9-b566-419e-ad9a-e7b361edd24a" containerID="c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9" exitCode=0 Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.752686 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.752682 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" event={"ID":"c1261ad9-b566-419e-ad9a-e7b361edd24a","Type":"ContainerDied","Data":"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9"} Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.752785 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p" event={"ID":"c1261ad9-b566-419e-ad9a-e7b361edd24a","Type":"ContainerDied","Data":"dc80c75e631ddfe187b07f15ee9e80b1ac073885b8f92777fe678a5bd2e3cfc2"} Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.752803 4791 scope.go:117] "RemoveContainer" containerID="c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.754594 4791 generic.go:334] "Generic (PLEG): container finished" podID="da348cdf-f9c8-4e7c-b462-7d4979dada11" containerID="29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db" exitCode=0 Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.754647 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" event={"ID":"da348cdf-f9c8-4e7c-b462-7d4979dada11","Type":"ContainerDied","Data":"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db"} Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.754663 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.754683 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w8nfn" event={"ID":"da348cdf-f9c8-4e7c-b462-7d4979dada11","Type":"ContainerDied","Data":"f3da2a06175ac96686413ab0f9eee802183401fd3a1355a069b06aa464d2cc81"} Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.770626 4791 scope.go:117] "RemoveContainer" containerID="c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9" Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.770989 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9\": container with ID starting with c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9 not found: ID does not exist" containerID="c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.771023 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9"} err="failed to get container status \"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9\": rpc error: code = NotFound desc = could not find container \"c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9\": container with ID starting with c89582836db97f7126a9e93e286edad8c17dd36f708c5681bd218f4e6a9239c9 not found: ID does not exist" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.771051 4791 scope.go:117] "RemoveContainer" containerID="29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.797406 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.801890 4791 scope.go:117] "RemoveContainer" containerID="29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db" Feb 18 00:39:59 crc kubenswrapper[4791]: E0218 00:39:59.802408 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db\": container with ID starting with 29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db not found: ID does not exist" containerID="29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.802451 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db"} err="failed to get container status \"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db\": rpc error: code = NotFound desc = could not find container \"29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db\": container with ID starting with 29419d6772a182a34015dcbab5838bc82843f52d5fd7f483eb3941c4d24999db not found: ID does not exist" Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.811205 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2m72p"] Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.815278 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:39:59 crc kubenswrapper[4791]: I0218 00:39:59.818433 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w8nfn"] Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.129775 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6"] Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.299990 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.300883 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.305323 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.305678 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.306764 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.308054 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.308126 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.308374 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.310784 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.492229 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.492361 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.492438 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.492489 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vdfb\" (UniqueName: \"kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.593269 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.593331 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vdfb\" (UniqueName: \"kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.593393 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.593436 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.594350 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.594423 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.598821 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.619654 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vdfb\" (UniqueName: \"kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb\") pod \"route-controller-manager-94b4d8c88-ls6l4\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.624930 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.762375 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" event={"ID":"dd8745b3-510d-40df-8c5c-449d5b7b4a99","Type":"ContainerStarted","Data":"0c71da37adec711b48256836bd8c807186b5d8b92d3dde9f728f132ec1480b27"} Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.762412 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" event={"ID":"dd8745b3-510d-40df-8c5c-449d5b7b4a99","Type":"ContainerStarted","Data":"bf8ebc98f82f46eafa3e28b7fdbeff9d5c64a26c07f9d2afd10ce3b66ad59bd0"} Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.764414 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.773258 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.781787 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b998fd5d9-vg6m6" podStartSLOduration=1.781724155 podStartE2EDuration="1.781724155s" podCreationTimestamp="2026-02-18 00:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:40:00.780560107 +0000 UTC m=+342.348573277" watchObservedRunningTime="2026-02-18 00:40:00.781724155 +0000 UTC m=+342.349737345" Feb 18 00:40:00 crc kubenswrapper[4791]: I0218 00:40:00.842087 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:00 crc kubenswrapper[4791]: W0218 00:40:00.852731 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod731ba417_d05f_48e6_bd6b_9dc95433636f.slice/crio-4714f78a8b39134ad1a85677d9b67274cd940708b4d9b976efa2b4565f49a76a WatchSource:0}: Error finding container 4714f78a8b39134ad1a85677d9b67274cd940708b4d9b976efa2b4565f49a76a: Status 404 returned error can't find the container with id 4714f78a8b39134ad1a85677d9b67274cd940708b4d9b976efa2b4565f49a76a Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.067746 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1261ad9-b566-419e-ad9a-e7b361edd24a" path="/var/lib/kubelet/pods/c1261ad9-b566-419e-ad9a-e7b361edd24a/volumes" Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.068660 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da348cdf-f9c8-4e7c-b462-7d4979dada11" path="/var/lib/kubelet/pods/da348cdf-f9c8-4e7c-b462-7d4979dada11/volumes" Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.783879 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" event={"ID":"731ba417-d05f-48e6-bd6b-9dc95433636f","Type":"ContainerStarted","Data":"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8"} Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.783947 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" event={"ID":"731ba417-d05f-48e6-bd6b-9dc95433636f","Type":"ContainerStarted","Data":"4714f78a8b39134ad1a85677d9b67274cd940708b4d9b976efa2b4565f49a76a"} Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.784222 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.789823 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:01 crc kubenswrapper[4791]: I0218 00:40:01.802563 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" podStartSLOduration=3.802544379 podStartE2EDuration="3.802544379s" podCreationTimestamp="2026-02-18 00:39:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:40:01.801956101 +0000 UTC m=+343.369969281" watchObservedRunningTime="2026-02-18 00:40:01.802544379 +0000 UTC m=+343.370557549" Feb 18 00:40:19 crc kubenswrapper[4791]: I0218 00:40:19.768084 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:19 crc kubenswrapper[4791]: I0218 00:40:19.770574 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" podUID="731ba417-d05f-48e6-bd6b-9dc95433636f" containerName="route-controller-manager" containerID="cri-o://9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8" gracePeriod=30 Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.197536 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.248903 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config\") pod \"731ba417-d05f-48e6-bd6b-9dc95433636f\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.248964 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vdfb\" (UniqueName: \"kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb\") pod \"731ba417-d05f-48e6-bd6b-9dc95433636f\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.249024 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert\") pod \"731ba417-d05f-48e6-bd6b-9dc95433636f\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.249055 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca\") pod \"731ba417-d05f-48e6-bd6b-9dc95433636f\" (UID: \"731ba417-d05f-48e6-bd6b-9dc95433636f\") " Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.249863 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca" (OuterVolumeSpecName: "client-ca") pod "731ba417-d05f-48e6-bd6b-9dc95433636f" (UID: "731ba417-d05f-48e6-bd6b-9dc95433636f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.250421 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config" (OuterVolumeSpecName: "config") pod "731ba417-d05f-48e6-bd6b-9dc95433636f" (UID: "731ba417-d05f-48e6-bd6b-9dc95433636f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.253987 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "731ba417-d05f-48e6-bd6b-9dc95433636f" (UID: "731ba417-d05f-48e6-bd6b-9dc95433636f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.256709 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb" (OuterVolumeSpecName: "kube-api-access-4vdfb") pod "731ba417-d05f-48e6-bd6b-9dc95433636f" (UID: "731ba417-d05f-48e6-bd6b-9dc95433636f"). InnerVolumeSpecName "kube-api-access-4vdfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.350871 4791 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/731ba417-d05f-48e6-bd6b-9dc95433636f-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.350907 4791 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-client-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.350916 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/731ba417-d05f-48e6-bd6b-9dc95433636f-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.350947 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vdfb\" (UniqueName: \"kubernetes.io/projected/731ba417-d05f-48e6-bd6b-9dc95433636f-kube-api-access-4vdfb\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.887812 4791 generic.go:334] "Generic (PLEG): container finished" podID="731ba417-d05f-48e6-bd6b-9dc95433636f" containerID="9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8" exitCode=0 Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.887881 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" event={"ID":"731ba417-d05f-48e6-bd6b-9dc95433636f","Type":"ContainerDied","Data":"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8"} Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.887904 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.887940 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4" event={"ID":"731ba417-d05f-48e6-bd6b-9dc95433636f","Type":"ContainerDied","Data":"4714f78a8b39134ad1a85677d9b67274cd940708b4d9b976efa2b4565f49a76a"} Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.887987 4791 scope.go:117] "RemoveContainer" containerID="9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.907001 4791 scope.go:117] "RemoveContainer" containerID="9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8" Feb 18 00:40:20 crc kubenswrapper[4791]: E0218 00:40:20.907439 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8\": container with ID starting with 9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8 not found: ID does not exist" containerID="9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.907478 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8"} err="failed to get container status \"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8\": rpc error: code = NotFound desc = could not find container \"9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8\": container with ID starting with 9bdc4819cc97c172cec83d38ff844e7d865fcf46f71d2508fb823b9c774292f8 not found: ID does not exist" Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.928844 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:20 crc kubenswrapper[4791]: I0218 00:40:20.936311 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-94b4d8c88-ls6l4"] Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.067712 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="731ba417-d05f-48e6-bd6b-9dc95433636f" path="/var/lib/kubelet/pods/731ba417-d05f-48e6-bd6b-9dc95433636f/volumes" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.314237 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8"] Feb 18 00:40:21 crc kubenswrapper[4791]: E0218 00:40:21.314435 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731ba417-d05f-48e6-bd6b-9dc95433636f" containerName="route-controller-manager" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.314448 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="731ba417-d05f-48e6-bd6b-9dc95433636f" containerName="route-controller-manager" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.314554 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="731ba417-d05f-48e6-bd6b-9dc95433636f" containerName="route-controller-manager" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.314867 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.316992 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.317011 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.317502 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.317585 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.317672 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.324219 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8"] Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.327386 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.363075 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7b66\" (UniqueName: \"kubernetes.io/projected/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-kube-api-access-t7b66\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.363153 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-client-ca\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.363228 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-config\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.363252 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-serving-cert\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.464178 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7b66\" (UniqueName: \"kubernetes.io/projected/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-kube-api-access-t7b66\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.464279 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-client-ca\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.464319 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-config\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.464355 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-serving-cert\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.465529 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-client-ca\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.466912 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-config\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.468013 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-serving-cert\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.487926 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7b66\" (UniqueName: \"kubernetes.io/projected/fa7ef194-9f18-47d7-808d-dccf8c7e75e3-kube-api-access-t7b66\") pod \"route-controller-manager-6f9cc86d95-cgnd8\" (UID: \"fa7ef194-9f18-47d7-808d-dccf8c7e75e3\") " pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:21 crc kubenswrapper[4791]: I0218 00:40:21.640811 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.125660 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8"] Feb 18 00:40:22 crc kubenswrapper[4791]: W0218 00:40:22.132070 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa7ef194_9f18_47d7_808d_dccf8c7e75e3.slice/crio-bea53c55779607fce227254f543f14aca569e23bd921678f3163f4addf999624 WatchSource:0}: Error finding container bea53c55779607fce227254f543f14aca569e23bd921678f3163f4addf999624: Status 404 returned error can't find the container with id bea53c55779607fce227254f543f14aca569e23bd921678f3163f4addf999624 Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.905087 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" event={"ID":"fa7ef194-9f18-47d7-808d-dccf8c7e75e3","Type":"ContainerStarted","Data":"3508ad2cb1cdc125001e601d2baca9cad7db318fb98caddbd1dfe3784e2dd9aa"} Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.905502 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.905518 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" event={"ID":"fa7ef194-9f18-47d7-808d-dccf8c7e75e3","Type":"ContainerStarted","Data":"bea53c55779607fce227254f543f14aca569e23bd921678f3163f4addf999624"} Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.909530 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" Feb 18 00:40:22 crc kubenswrapper[4791]: I0218 00:40:22.927423 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6f9cc86d95-cgnd8" podStartSLOduration=3.927406471 podStartE2EDuration="3.927406471s" podCreationTimestamp="2026-02-18 00:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:40:22.922561758 +0000 UTC m=+364.490574938" watchObservedRunningTime="2026-02-18 00:40:22.927406471 +0000 UTC m=+364.495419641" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.601524 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hsgxk"] Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.602276 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.619146 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hsgxk"] Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.735909 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/164fb385-b552-436d-b235-90a13e193fc6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.735955 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-trusted-ca\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.735975 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/164fb385-b552-436d-b235-90a13e193fc6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.735995 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjscl\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-kube-api-access-hjscl\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.736016 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-registry-tls\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.736032 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-registry-certificates\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.736064 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-bound-sa-token\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.736150 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.754848 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837782 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-registry-certificates\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837838 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-bound-sa-token\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837909 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/164fb385-b552-436d-b235-90a13e193fc6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837936 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-trusted-ca\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837959 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/164fb385-b552-436d-b235-90a13e193fc6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.837985 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjscl\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-kube-api-access-hjscl\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.838010 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-registry-tls\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.838913 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-trusted-ca\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.839035 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/164fb385-b552-436d-b235-90a13e193fc6-registry-certificates\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.839192 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/164fb385-b552-436d-b235-90a13e193fc6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.843946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-registry-tls\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.844768 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/164fb385-b552-436d-b235-90a13e193fc6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.853200 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-bound-sa-token\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.857915 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjscl\" (UniqueName: \"kubernetes.io/projected/164fb385-b552-436d-b235-90a13e193fc6-kube-api-access-hjscl\") pod \"image-registry-66df7c8f76-hsgxk\" (UID: \"164fb385-b552-436d-b235-90a13e193fc6\") " pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:24 crc kubenswrapper[4791]: I0218 00:40:24.920129 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:25 crc kubenswrapper[4791]: I0218 00:40:25.327983 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hsgxk"] Feb 18 00:40:25 crc kubenswrapper[4791]: W0218 00:40:25.337879 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod164fb385_b552_436d_b235_90a13e193fc6.slice/crio-7caa1c820c9419265c93bc95be3a5303f72e14163bdb224b040c361fafd00dfd WatchSource:0}: Error finding container 7caa1c820c9419265c93bc95be3a5303f72e14163bdb224b040c361fafd00dfd: Status 404 returned error can't find the container with id 7caa1c820c9419265c93bc95be3a5303f72e14163bdb224b040c361fafd00dfd Feb 18 00:40:25 crc kubenswrapper[4791]: I0218 00:40:25.920546 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" event={"ID":"164fb385-b552-436d-b235-90a13e193fc6","Type":"ContainerStarted","Data":"b1ae844763d8d00ee03f9999dc1e924a8a6ed988e9d662644dcbdc6621a29289"} Feb 18 00:40:25 crc kubenswrapper[4791]: I0218 00:40:25.920585 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" event={"ID":"164fb385-b552-436d-b235-90a13e193fc6","Type":"ContainerStarted","Data":"7caa1c820c9419265c93bc95be3a5303f72e14163bdb224b040c361fafd00dfd"} Feb 18 00:40:25 crc kubenswrapper[4791]: I0218 00:40:25.920701 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:25 crc kubenswrapper[4791]: I0218 00:40:25.939622 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" podStartSLOduration=1.939602676 podStartE2EDuration="1.939602676s" podCreationTimestamp="2026-02-18 00:40:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:40:25.936696165 +0000 UTC m=+367.504709345" watchObservedRunningTime="2026-02-18 00:40:25.939602676 +0000 UTC m=+367.507615846" Feb 18 00:40:26 crc kubenswrapper[4791]: I0218 00:40:26.800396 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:40:26 crc kubenswrapper[4791]: I0218 00:40:26.800683 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:40:44 crc kubenswrapper[4791]: I0218 00:40:44.929413 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-hsgxk" Feb 18 00:40:44 crc kubenswrapper[4791]: I0218 00:40:44.985909 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.249083 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.249335 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cp28s" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="registry-server" containerID="cri-o://a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990" gracePeriod=30 Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.262188 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.262523 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mklvx" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="registry-server" containerID="cri-o://24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9" gracePeriod=30 Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.267337 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.267728 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" containerID="cri-o://9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0" gracePeriod=30 Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.278883 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.279145 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l5zp8" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="registry-server" containerID="cri-o://56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd" gracePeriod=30 Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.293438 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2zpl9"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.294143 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.296967 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.297296 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mqrgh" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="registry-server" containerID="cri-o://eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4" gracePeriod=30 Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.303707 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2zpl9"] Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.344148 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdlkd\" (UniqueName: \"kubernetes.io/projected/2b61f101-7b72-4308-9c07-9d2e441f333c-kube-api-access-zdlkd\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.344265 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.344292 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.445363 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.445403 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.445452 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdlkd\" (UniqueName: \"kubernetes.io/projected/2b61f101-7b72-4308-9c07-9d2e441f333c-kube-api-access-zdlkd\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.451333 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.475473 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2b61f101-7b72-4308-9c07-9d2e441f333c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.491535 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdlkd\" (UniqueName: \"kubernetes.io/projected/2b61f101-7b72-4308-9c07-9d2e441f333c-kube-api-access-zdlkd\") pod \"marketplace-operator-79b997595-2zpl9\" (UID: \"2b61f101-7b72-4308-9c07-9d2e441f333c\") " pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.614876 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.752232 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.779273 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.810426 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.833860 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.854604 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content\") pod \"719d62e6-1ac9-497f-b889-d2ee84c621d1\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.854854 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities\") pod \"14300052-284f-4b0c-8238-13ea3a9ddb6a\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.855006 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content\") pod \"14300052-284f-4b0c-8238-13ea3a9ddb6a\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.855144 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4zch\" (UniqueName: \"kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch\") pod \"719d62e6-1ac9-497f-b889-d2ee84c621d1\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.855202 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities\") pod \"719d62e6-1ac9-497f-b889-d2ee84c621d1\" (UID: \"719d62e6-1ac9-497f-b889-d2ee84c621d1\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.855234 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbmr8\" (UniqueName: \"kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8\") pod \"14300052-284f-4b0c-8238-13ea3a9ddb6a\" (UID: \"14300052-284f-4b0c-8238-13ea3a9ddb6a\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.859107 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities" (OuterVolumeSpecName: "utilities") pod "14300052-284f-4b0c-8238-13ea3a9ddb6a" (UID: "14300052-284f-4b0c-8238-13ea3a9ddb6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.860311 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities" (OuterVolumeSpecName: "utilities") pod "719d62e6-1ac9-497f-b889-d2ee84c621d1" (UID: "719d62e6-1ac9-497f-b889-d2ee84c621d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.863618 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.864339 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch" (OuterVolumeSpecName: "kube-api-access-b4zch") pod "719d62e6-1ac9-497f-b889-d2ee84c621d1" (UID: "719d62e6-1ac9-497f-b889-d2ee84c621d1"). InnerVolumeSpecName "kube-api-access-b4zch". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.864413 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8" (OuterVolumeSpecName: "kube-api-access-fbmr8") pod "14300052-284f-4b0c-8238-13ea3a9ddb6a" (UID: "14300052-284f-4b0c-8238-13ea3a9ddb6a"). InnerVolumeSpecName "kube-api-access-fbmr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.936067 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "719d62e6-1ac9-497f-b889-d2ee84c621d1" (UID: "719d62e6-1ac9-497f-b889-d2ee84c621d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.937880 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14300052-284f-4b0c-8238-13ea3a9ddb6a" (UID: "14300052-284f-4b0c-8238-13ea3a9ddb6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956235 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4krc\" (UniqueName: \"kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc\") pod \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956343 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content\") pod \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956388 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content\") pod \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956425 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxk9n\" (UniqueName: \"kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n\") pod \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956490 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics\") pod \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956545 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm7wc\" (UniqueName: \"kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc\") pod \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956578 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca\") pod \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\" (UID: \"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956622 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities\") pod \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\" (UID: \"f5747934-5c30-4b86-bfd5-03a8ad37c9bd\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956640 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities\") pod \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\" (UID: \"edb21ff9-bfba-4ff7-a6df-54d2236a1233\") " Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956941 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956960 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4zch\" (UniqueName: \"kubernetes.io/projected/719d62e6-1ac9-497f-b889-d2ee84c621d1-kube-api-access-b4zch\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956972 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.956983 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbmr8\" (UniqueName: \"kubernetes.io/projected/14300052-284f-4b0c-8238-13ea3a9ddb6a-kube-api-access-fbmr8\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.957013 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/719d62e6-1ac9-497f-b889-d2ee84c621d1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.957024 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14300052-284f-4b0c-8238-13ea3a9ddb6a-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.958053 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" (UID: "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.958112 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities" (OuterVolumeSpecName: "utilities") pod "f5747934-5c30-4b86-bfd5-03a8ad37c9bd" (UID: "f5747934-5c30-4b86-bfd5-03a8ad37c9bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.958452 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities" (OuterVolumeSpecName: "utilities") pod "edb21ff9-bfba-4ff7-a6df-54d2236a1233" (UID: "edb21ff9-bfba-4ff7-a6df-54d2236a1233"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.958916 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc" (OuterVolumeSpecName: "kube-api-access-p4krc") pod "edb21ff9-bfba-4ff7-a6df-54d2236a1233" (UID: "edb21ff9-bfba-4ff7-a6df-54d2236a1233"). InnerVolumeSpecName "kube-api-access-p4krc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.959644 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" (UID: "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.959871 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n" (OuterVolumeSpecName: "kube-api-access-cxk9n") pod "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" (UID: "d06d8fca-e20d-4b6b-9466-8c612ec8cc5f"). InnerVolumeSpecName "kube-api-access-cxk9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.961261 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc" (OuterVolumeSpecName: "kube-api-access-pm7wc") pod "f5747934-5c30-4b86-bfd5-03a8ad37c9bd" (UID: "f5747934-5c30-4b86-bfd5-03a8ad37c9bd"). InnerVolumeSpecName "kube-api-access-pm7wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:40:45 crc kubenswrapper[4791]: I0218 00:40:45.988007 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5747934-5c30-4b86-bfd5-03a8ad37c9bd" (UID: "f5747934-5c30-4b86-bfd5-03a8ad37c9bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.039570 4791 generic.go:334] "Generic (PLEG): container finished" podID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerID="a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990" exitCode=0 Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.039867 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cp28s" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.039765 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerDied","Data":"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.040085 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cp28s" event={"ID":"719d62e6-1ac9-497f-b889-d2ee84c621d1","Type":"ContainerDied","Data":"3d54bc38584a692c514346dc9d2939839c2f85fca99ecaec01e12aafaf11098a"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.040209 4791 scope.go:117] "RemoveContainer" containerID="a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.044726 4791 generic.go:334] "Generic (PLEG): container finished" podID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerID="9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0" exitCode=0 Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.044889 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerDied","Data":"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.045000 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" event={"ID":"d06d8fca-e20d-4b6b-9466-8c612ec8cc5f","Type":"ContainerDied","Data":"687fd3256454c31d6944134a5ed3ecce5a11b684076625c7b19a9c8ebcb77ae5"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.045149 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plj59" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.055717 4791 generic.go:334] "Generic (PLEG): container finished" podID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerID="24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9" exitCode=0 Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.055755 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mklvx" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.055857 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerDied","Data":"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.056203 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mklvx" event={"ID":"14300052-284f-4b0c-8238-13ea3a9ddb6a","Type":"ContainerDied","Data":"1ab783d14212ef3aba4e76a514e4a75877f863004f54e9bd8c493cc46b4217c9"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058403 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058425 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxk9n\" (UniqueName: \"kubernetes.io/projected/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-kube-api-access-cxk9n\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058436 4791 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058445 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm7wc\" (UniqueName: \"kubernetes.io/projected/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-kube-api-access-pm7wc\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058454 4791 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058463 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5747934-5c30-4b86-bfd5-03a8ad37c9bd-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058471 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.058479 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4krc\" (UniqueName: \"kubernetes.io/projected/edb21ff9-bfba-4ff7-a6df-54d2236a1233-kube-api-access-p4krc\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.067568 4791 generic.go:334] "Generic (PLEG): container finished" podID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerID="eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4" exitCode=0 Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.067646 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerDied","Data":"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.067679 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mqrgh" event={"ID":"edb21ff9-bfba-4ff7-a6df-54d2236a1233","Type":"ContainerDied","Data":"a494bdf4351686227d3dcc4eb1b047146bb9742e5fcd8aa4b9f6bd9b555fdc68"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.067781 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mqrgh" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.070761 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.073363 4791 generic.go:334] "Generic (PLEG): container finished" podID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerID="56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd" exitCode=0 Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.073491 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerDied","Data":"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.073568 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l5zp8" event={"ID":"f5747934-5c30-4b86-bfd5-03a8ad37c9bd","Type":"ContainerDied","Data":"a21bd7a9974d43391d23b817771c14e4f64240abee15da220981494ba671db44"} Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.073710 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l5zp8" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.074595 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cp28s"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.078762 4791 scope.go:117] "RemoveContainer" containerID="ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.090377 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.093849 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mklvx"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.102525 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.106884 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plj59"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.114657 4791 scope.go:117] "RemoveContainer" containerID="cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.117565 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "edb21ff9-bfba-4ff7-a6df-54d2236a1233" (UID: "edb21ff9-bfba-4ff7-a6df-54d2236a1233"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.119829 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2zpl9"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.127225 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.131103 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l5zp8"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.138300 4791 scope.go:117] "RemoveContainer" containerID="a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.138798 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990\": container with ID starting with a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990 not found: ID does not exist" containerID="a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.138904 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990"} err="failed to get container status \"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990\": rpc error: code = NotFound desc = could not find container \"a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990\": container with ID starting with a699afb9d72f8df1c7d3f49b4c78af8d27c3b1dd1edf6251fcf5d63cfd973990 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.139126 4791 scope.go:117] "RemoveContainer" containerID="ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.139475 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968\": container with ID starting with ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968 not found: ID does not exist" containerID="ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.139618 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968"} err="failed to get container status \"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968\": rpc error: code = NotFound desc = could not find container \"ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968\": container with ID starting with ccd3c0147f54023337d87ccbeb1eac61ea940efcc4cac3bceaeb9c9e46d59968 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.139716 4791 scope.go:117] "RemoveContainer" containerID="cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.139986 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45\": container with ID starting with cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45 not found: ID does not exist" containerID="cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.140105 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45"} err="failed to get container status \"cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45\": rpc error: code = NotFound desc = could not find container \"cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45\": container with ID starting with cd71c50873f3c32a884edff9ff74afd2060de9a95e5dbf3f6a615bbf48e90c45 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.140323 4791 scope.go:117] "RemoveContainer" containerID="9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.153831 4791 scope.go:117] "RemoveContainer" containerID="c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.159962 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb21ff9-bfba-4ff7-a6df-54d2236a1233-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.209381 4791 scope.go:117] "RemoveContainer" containerID="9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.217897 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0\": container with ID starting with 9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0 not found: ID does not exist" containerID="9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.217948 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0"} err="failed to get container status \"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0\": rpc error: code = NotFound desc = could not find container \"9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0\": container with ID starting with 9c6aac33fb841e399d9a20d248df9763fb743255e256273f2ff40099d2ea1bb0 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.217974 4791 scope.go:117] "RemoveContainer" containerID="c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.221337 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3\": container with ID starting with c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3 not found: ID does not exist" containerID="c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.221381 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3"} err="failed to get container status \"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3\": rpc error: code = NotFound desc = could not find container \"c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3\": container with ID starting with c70f4e307190f827761f4cbe731248232328184bdd05f5bd8303bec180f114e3 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.221409 4791 scope.go:117] "RemoveContainer" containerID="24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.245806 4791 scope.go:117] "RemoveContainer" containerID="9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.266318 4791 scope.go:117] "RemoveContainer" containerID="6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.284143 4791 scope.go:117] "RemoveContainer" containerID="24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.284530 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9\": container with ID starting with 24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9 not found: ID does not exist" containerID="24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.284556 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9"} err="failed to get container status \"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9\": rpc error: code = NotFound desc = could not find container \"24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9\": container with ID starting with 24290760148bc7b3981db02d2eb98d5f2e79a9166153a8e4d8de21ec2421cdb9 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.284577 4791 scope.go:117] "RemoveContainer" containerID="9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.284773 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863\": container with ID starting with 9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863 not found: ID does not exist" containerID="9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.284792 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863"} err="failed to get container status \"9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863\": rpc error: code = NotFound desc = could not find container \"9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863\": container with ID starting with 9053435eba5631c5a190c56d4f5c55bf1e5d7a72c47f8c01ec241a2803eb1863 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.284806 4791 scope.go:117] "RemoveContainer" containerID="6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.285000 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6\": container with ID starting with 6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6 not found: ID does not exist" containerID="6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.285024 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6"} err="failed to get container status \"6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6\": rpc error: code = NotFound desc = could not find container \"6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6\": container with ID starting with 6d93079bb0f145bd6a361c270c783b4d2f098f09086b78639ec026f056ae13a6 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.285036 4791 scope.go:117] "RemoveContainer" containerID="eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.295802 4791 scope.go:117] "RemoveContainer" containerID="e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.309298 4791 scope.go:117] "RemoveContainer" containerID="ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.320011 4791 scope.go:117] "RemoveContainer" containerID="eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.320411 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4\": container with ID starting with eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4 not found: ID does not exist" containerID="eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.320524 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4"} err="failed to get container status \"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4\": rpc error: code = NotFound desc = could not find container \"eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4\": container with ID starting with eb4ec9c11186c4bab8b39d6f58905a349e54d99a639b60e710e600b0fc4975b4 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.320619 4791 scope.go:117] "RemoveContainer" containerID="e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.321038 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6\": container with ID starting with e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6 not found: ID does not exist" containerID="e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.321067 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6"} err="failed to get container status \"e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6\": rpc error: code = NotFound desc = could not find container \"e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6\": container with ID starting with e3fc07dc9a877c48d298afdecb8de3f2d0d401681f6f748cc55cba0bb52232e6 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.321088 4791 scope.go:117] "RemoveContainer" containerID="ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.321441 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7\": container with ID starting with ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7 not found: ID does not exist" containerID="ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.321525 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7"} err="failed to get container status \"ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7\": rpc error: code = NotFound desc = could not find container \"ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7\": container with ID starting with ac0e4f7a2017786360524f10e9274f18bfcb5fb18aa90d8f7025809faed105c7 not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.321599 4791 scope.go:117] "RemoveContainer" containerID="56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.334467 4791 scope.go:117] "RemoveContainer" containerID="57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.353942 4791 scope.go:117] "RemoveContainer" containerID="204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.365571 4791 scope.go:117] "RemoveContainer" containerID="56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.365972 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd\": container with ID starting with 56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd not found: ID does not exist" containerID="56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.366002 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd"} err="failed to get container status \"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd\": rpc error: code = NotFound desc = could not find container \"56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd\": container with ID starting with 56755343ab669ca830c1e822719b7597531411a842b5b2cb39d4ae4bda6374fd not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.366025 4791 scope.go:117] "RemoveContainer" containerID="57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.366591 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb\": container with ID starting with 57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb not found: ID does not exist" containerID="57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.366728 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb"} err="failed to get container status \"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb\": rpc error: code = NotFound desc = could not find container \"57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb\": container with ID starting with 57c294bc09c7f60d4d87138e2aeb5c3b2ce403e088f8a1d5c4f1ec61532966eb not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.366843 4791 scope.go:117] "RemoveContainer" containerID="204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d" Feb 18 00:40:46 crc kubenswrapper[4791]: E0218 00:40:46.367256 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d\": container with ID starting with 204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d not found: ID does not exist" containerID="204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.367292 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d"} err="failed to get container status \"204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d\": rpc error: code = NotFound desc = could not find container \"204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d\": container with ID starting with 204d99a0637bdf2318b878a8276638894586673a3c8b1647aa677c102e48962d not found: ID does not exist" Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.394703 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:40:46 crc kubenswrapper[4791]: I0218 00:40:46.394892 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mqrgh"] Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.069802 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" path="/var/lib/kubelet/pods/14300052-284f-4b0c-8238-13ea3a9ddb6a/volumes" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.071414 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" path="/var/lib/kubelet/pods/719d62e6-1ac9-497f-b889-d2ee84c621d1/volumes" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.072831 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" path="/var/lib/kubelet/pods/d06d8fca-e20d-4b6b-9466-8c612ec8cc5f/volumes" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.074887 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" path="/var/lib/kubelet/pods/edb21ff9-bfba-4ff7-a6df-54d2236a1233/volumes" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.076409 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" path="/var/lib/kubelet/pods/f5747934-5c30-4b86-bfd5-03a8ad37c9bd/volumes" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.087154 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" event={"ID":"2b61f101-7b72-4308-9c07-9d2e441f333c","Type":"ContainerStarted","Data":"e04ab24de9d659b0ec247766bb7be47905c9f0b27def742ec7689bdb19380c93"} Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.087219 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" event={"ID":"2b61f101-7b72-4308-9c07-9d2e441f333c","Type":"ContainerStarted","Data":"8687f770cd923fbb34c2b29ca39370b596bb690ba025debc48d080e76bef1fdb"} Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.087540 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.091235 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.105255 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2zpl9" podStartSLOduration=2.105230827 podStartE2EDuration="2.105230827s" podCreationTimestamp="2026-02-18 00:40:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:40:47.103480562 +0000 UTC m=+388.671493742" watchObservedRunningTime="2026-02-18 00:40:47.105230827 +0000 UTC m=+388.673244007" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.464279 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hhjg7"] Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465599 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465623 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465635 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465642 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465655 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465663 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465671 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465681 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465696 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465704 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465717 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465725 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465736 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465743 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465751 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465758 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465769 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465776 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465787 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465794 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465806 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465814 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="extract-content" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465823 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465830 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465844 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465851 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: E0218 00:40:47.465860 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465868 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="extract-utilities" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465982 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="719d62e6-1ac9-497f-b889-d2ee84c621d1" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.465999 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5747934-5c30-4b86-bfd5-03a8ad37c9bd" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.466007 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="14300052-284f-4b0c-8238-13ea3a9ddb6a" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.466017 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="edb21ff9-bfba-4ff7-a6df-54d2236a1233" containerName="registry-server" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.466028 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.466035 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d06d8fca-e20d-4b6b-9466-8c612ec8cc5f" containerName="marketplace-operator" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.466932 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.470577 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.477685 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hhjg7"] Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.577902 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfkbp\" (UniqueName: \"kubernetes.io/projected/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-kube-api-access-mfkbp\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.577952 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-utilities\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.578260 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-catalog-content\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.662481 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.664922 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.667723 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.679173 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-utilities\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.679241 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-catalog-content\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.679319 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfkbp\" (UniqueName: \"kubernetes.io/projected/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-kube-api-access-mfkbp\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.679810 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-utilities\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.680025 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-catalog-content\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.698050 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.711704 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfkbp\" (UniqueName: \"kubernetes.io/projected/cd6b21cf-4c8d-4f85-8587-3184b3063b8a-kube-api-access-mfkbp\") pod \"redhat-marketplace-hhjg7\" (UID: \"cd6b21cf-4c8d-4f85-8587-3184b3063b8a\") " pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.780290 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz96n\" (UniqueName: \"kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.780699 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.780838 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.784030 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.882493 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.883031 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.883087 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz96n\" (UniqueName: \"kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.883179 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.883476 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.899952 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz96n\" (UniqueName: \"kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n\") pod \"certified-operators-vsm7z\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:47 crc kubenswrapper[4791]: I0218 00:40:47.983609 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:48 crc kubenswrapper[4791]: I0218 00:40:48.184250 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 00:40:48 crc kubenswrapper[4791]: I0218 00:40:48.186706 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hhjg7"] Feb 18 00:40:48 crc kubenswrapper[4791]: W0218 00:40:48.193049 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd6b21cf_4c8d_4f85_8587_3184b3063b8a.slice/crio-61e987055632ef65cde1aeac1b82639a0fbb80caff43e583fd1fe5b573792316 WatchSource:0}: Error finding container 61e987055632ef65cde1aeac1b82639a0fbb80caff43e583fd1fe5b573792316: Status 404 returned error can't find the container with id 61e987055632ef65cde1aeac1b82639a0fbb80caff43e583fd1fe5b573792316 Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.109298 4791 generic.go:334] "Generic (PLEG): container finished" podID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerID="a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def" exitCode=0 Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.109346 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerDied","Data":"a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def"} Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.109659 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerStarted","Data":"4879bca979ad3f0cb62966821bee72c11fc48c5c22321d3e8b0b7eaadf030f2c"} Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.112907 4791 generic.go:334] "Generic (PLEG): container finished" podID="cd6b21cf-4c8d-4f85-8587-3184b3063b8a" containerID="edff5ae37af10197d91b53c3dee3927a09cb6d35eae950bdecd2743a4b07e273" exitCode=0 Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.113075 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hhjg7" event={"ID":"cd6b21cf-4c8d-4f85-8587-3184b3063b8a","Type":"ContainerDied","Data":"edff5ae37af10197d91b53c3dee3927a09cb6d35eae950bdecd2743a4b07e273"} Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.113103 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hhjg7" event={"ID":"cd6b21cf-4c8d-4f85-8587-3184b3063b8a","Type":"ContainerStarted","Data":"61e987055632ef65cde1aeac1b82639a0fbb80caff43e583fd1fe5b573792316"} Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.867548 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.868933 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.871653 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 18 00:40:49 crc kubenswrapper[4791]: I0218 00:40:49.872934 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.010240 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.010309 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.010351 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drp2s\" (UniqueName: \"kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.077813 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gf4b2"] Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.081090 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.083865 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.091176 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gf4b2"] Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.111420 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drp2s\" (UniqueName: \"kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.111529 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.111605 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.112558 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.112861 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.118676 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerStarted","Data":"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f"} Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.120636 4791 generic.go:334] "Generic (PLEG): container finished" podID="cd6b21cf-4c8d-4f85-8587-3184b3063b8a" containerID="6d4a9f9fd8007e7d3d1de5155a7e94c1177cca01437bfaea09c7bac623d930fd" exitCode=0 Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.120718 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hhjg7" event={"ID":"cd6b21cf-4c8d-4f85-8587-3184b3063b8a","Type":"ContainerDied","Data":"6d4a9f9fd8007e7d3d1de5155a7e94c1177cca01437bfaea09c7bac623d930fd"} Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.134029 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drp2s\" (UniqueName: \"kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s\") pod \"community-operators-rt86r\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.192774 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.212804 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5x9s\" (UniqueName: \"kubernetes.io/projected/498618d7-e10c-442d-9150-7cd04846d4d5-kube-api-access-h5x9s\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.212874 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-utilities\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.212903 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-catalog-content\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.314029 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5x9s\" (UniqueName: \"kubernetes.io/projected/498618d7-e10c-442d-9150-7cd04846d4d5-kube-api-access-h5x9s\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.314424 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-utilities\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.314451 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-catalog-content\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.314860 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-utilities\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.315082 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/498618d7-e10c-442d-9150-7cd04846d4d5-catalog-content\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.331439 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5x9s\" (UniqueName: \"kubernetes.io/projected/498618d7-e10c-442d-9150-7cd04846d4d5-kube-api-access-h5x9s\") pod \"redhat-operators-gf4b2\" (UID: \"498618d7-e10c-442d-9150-7cd04846d4d5\") " pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.362957 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 00:40:50 crc kubenswrapper[4791]: W0218 00:40:50.370783 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc9c385f_8293_43dc_902b_89632cda2af9.slice/crio-cb038567eaaedbb3730c621250847789e096aac4618bc0d5ea17d8b67fda6e3a WatchSource:0}: Error finding container cb038567eaaedbb3730c621250847789e096aac4618bc0d5ea17d8b67fda6e3a: Status 404 returned error can't find the container with id cb038567eaaedbb3730c621250847789e096aac4618bc0d5ea17d8b67fda6e3a Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.396305 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:40:50 crc kubenswrapper[4791]: I0218 00:40:50.771404 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gf4b2"] Feb 18 00:40:50 crc kubenswrapper[4791]: W0218 00:40:50.777716 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod498618d7_e10c_442d_9150_7cd04846d4d5.slice/crio-2d70ed422c4fa749489f34ae473595bcd9f6b86ec092ffe0a7f575c9e2e86b53 WatchSource:0}: Error finding container 2d70ed422c4fa749489f34ae473595bcd9f6b86ec092ffe0a7f575c9e2e86b53: Status 404 returned error can't find the container with id 2d70ed422c4fa749489f34ae473595bcd9f6b86ec092ffe0a7f575c9e2e86b53 Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.129811 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc9c385f-8293-43dc-902b-89632cda2af9" containerID="ccdb4842ecc160f46564ca299721a5106726a0f4ba35bef5f4d38a28e1f6d883" exitCode=0 Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.130334 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerDied","Data":"ccdb4842ecc160f46564ca299721a5106726a0f4ba35bef5f4d38a28e1f6d883"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.130900 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerStarted","Data":"cb038567eaaedbb3730c621250847789e096aac4618bc0d5ea17d8b67fda6e3a"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.135004 4791 generic.go:334] "Generic (PLEG): container finished" podID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerID="a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f" exitCode=0 Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.135058 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerDied","Data":"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.138773 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hhjg7" event={"ID":"cd6b21cf-4c8d-4f85-8587-3184b3063b8a","Type":"ContainerStarted","Data":"bb39fba42031b1d03e861edaf5039d711bec028de26c359692748014849afeaa"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.140787 4791 generic.go:334] "Generic (PLEG): container finished" podID="498618d7-e10c-442d-9150-7cd04846d4d5" containerID="52d9556d414f527b96729154806b6a2a58da691a278592cab3a88f1c648ba1bc" exitCode=0 Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.140814 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gf4b2" event={"ID":"498618d7-e10c-442d-9150-7cd04846d4d5","Type":"ContainerDied","Data":"52d9556d414f527b96729154806b6a2a58da691a278592cab3a88f1c648ba1bc"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.140833 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gf4b2" event={"ID":"498618d7-e10c-442d-9150-7cd04846d4d5","Type":"ContainerStarted","Data":"2d70ed422c4fa749489f34ae473595bcd9f6b86ec092ffe0a7f575c9e2e86b53"} Feb 18 00:40:51 crc kubenswrapper[4791]: I0218 00:40:51.215026 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hhjg7" podStartSLOduration=2.808177639 podStartE2EDuration="4.215003607s" podCreationTimestamp="2026-02-18 00:40:47 +0000 UTC" firstStartedPulling="2026-02-18 00:40:49.115136034 +0000 UTC m=+390.683149204" lastFinishedPulling="2026-02-18 00:40:50.521962002 +0000 UTC m=+392.089975172" observedRunningTime="2026-02-18 00:40:51.209056969 +0000 UTC m=+392.777070149" watchObservedRunningTime="2026-02-18 00:40:51.215003607 +0000 UTC m=+392.783016777" Feb 18 00:40:52 crc kubenswrapper[4791]: I0218 00:40:52.151654 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gf4b2" event={"ID":"498618d7-e10c-442d-9150-7cd04846d4d5","Type":"ContainerStarted","Data":"1152b808b5a37f6c6eed37933a2ed2dc4601b692c27e7103f151d5bf6d938675"} Feb 18 00:40:52 crc kubenswrapper[4791]: I0218 00:40:52.154298 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerStarted","Data":"bbf72b2c9f12dee74f3fcede599f6f8a88a35b9f83d934c686c540c4b3cccc5f"} Feb 18 00:40:52 crc kubenswrapper[4791]: I0218 00:40:52.159329 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerStarted","Data":"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c"} Feb 18 00:40:52 crc kubenswrapper[4791]: I0218 00:40:52.200746 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vsm7z" podStartSLOduration=2.787133583 podStartE2EDuration="5.200729054s" podCreationTimestamp="2026-02-18 00:40:47 +0000 UTC" firstStartedPulling="2026-02-18 00:40:49.11250093 +0000 UTC m=+390.680514100" lastFinishedPulling="2026-02-18 00:40:51.526096401 +0000 UTC m=+393.094109571" observedRunningTime="2026-02-18 00:40:52.196113888 +0000 UTC m=+393.764127058" watchObservedRunningTime="2026-02-18 00:40:52.200729054 +0000 UTC m=+393.768742224" Feb 18 00:40:53 crc kubenswrapper[4791]: I0218 00:40:53.164941 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc9c385f-8293-43dc-902b-89632cda2af9" containerID="bbf72b2c9f12dee74f3fcede599f6f8a88a35b9f83d934c686c540c4b3cccc5f" exitCode=0 Feb 18 00:40:53 crc kubenswrapper[4791]: I0218 00:40:53.165451 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerDied","Data":"bbf72b2c9f12dee74f3fcede599f6f8a88a35b9f83d934c686c540c4b3cccc5f"} Feb 18 00:40:53 crc kubenswrapper[4791]: I0218 00:40:53.174585 4791 generic.go:334] "Generic (PLEG): container finished" podID="498618d7-e10c-442d-9150-7cd04846d4d5" containerID="1152b808b5a37f6c6eed37933a2ed2dc4601b692c27e7103f151d5bf6d938675" exitCode=0 Feb 18 00:40:53 crc kubenswrapper[4791]: I0218 00:40:53.174754 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gf4b2" event={"ID":"498618d7-e10c-442d-9150-7cd04846d4d5","Type":"ContainerDied","Data":"1152b808b5a37f6c6eed37933a2ed2dc4601b692c27e7103f151d5bf6d938675"} Feb 18 00:40:54 crc kubenswrapper[4791]: I0218 00:40:54.184381 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gf4b2" event={"ID":"498618d7-e10c-442d-9150-7cd04846d4d5","Type":"ContainerStarted","Data":"2e9304d7051a54076b7759efb5000d9df2b0bad05a0e82ae6e11dbebfaa0f02b"} Feb 18 00:40:54 crc kubenswrapper[4791]: I0218 00:40:54.188896 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerStarted","Data":"fce7df76572934b9580e55e2f39be6d123f908c71b89cf1fe6c1d537b27c3670"} Feb 18 00:40:54 crc kubenswrapper[4791]: I0218 00:40:54.207901 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gf4b2" podStartSLOduration=1.772704445 podStartE2EDuration="4.207882747s" podCreationTimestamp="2026-02-18 00:40:50 +0000 UTC" firstStartedPulling="2026-02-18 00:40:51.143402082 +0000 UTC m=+392.711415252" lastFinishedPulling="2026-02-18 00:40:53.578580384 +0000 UTC m=+395.146593554" observedRunningTime="2026-02-18 00:40:54.206700561 +0000 UTC m=+395.774713731" watchObservedRunningTime="2026-02-18 00:40:54.207882747 +0000 UTC m=+395.775895917" Feb 18 00:40:54 crc kubenswrapper[4791]: I0218 00:40:54.223461 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rt86r" podStartSLOduration=2.671204381 podStartE2EDuration="5.223443236s" podCreationTimestamp="2026-02-18 00:40:49 +0000 UTC" firstStartedPulling="2026-02-18 00:40:51.131432783 +0000 UTC m=+392.699445963" lastFinishedPulling="2026-02-18 00:40:53.683671648 +0000 UTC m=+395.251684818" observedRunningTime="2026-02-18 00:40:54.220722652 +0000 UTC m=+395.788735822" watchObservedRunningTime="2026-02-18 00:40:54.223443236 +0000 UTC m=+395.791456406" Feb 18 00:40:56 crc kubenswrapper[4791]: I0218 00:40:56.800125 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:40:56 crc kubenswrapper[4791]: I0218 00:40:56.800535 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:40:57 crc kubenswrapper[4791]: I0218 00:40:57.784455 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:57 crc kubenswrapper[4791]: I0218 00:40:57.784743 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:57 crc kubenswrapper[4791]: I0218 00:40:57.844587 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:57 crc kubenswrapper[4791]: I0218 00:40:57.984562 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:57 crc kubenswrapper[4791]: I0218 00:40:57.984622 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:58 crc kubenswrapper[4791]: I0218 00:40:58.035971 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:40:58 crc kubenswrapper[4791]: I0218 00:40:58.241752 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hhjg7" Feb 18 00:40:58 crc kubenswrapper[4791]: I0218 00:40:58.242764 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.193264 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.193758 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.233878 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.277739 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rt86r" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.396938 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.396990 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:41:00 crc kubenswrapper[4791]: I0218 00:41:00.440999 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:41:01 crc kubenswrapper[4791]: I0218 00:41:01.271146 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gf4b2" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.029036 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" podUID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" containerName="registry" containerID="cri-o://af0bd9293dd92cc94bae0f4f216ab7d397ce48f46b0dabbec0cb5cf4a603d709" gracePeriod=30 Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.269342 4791 generic.go:334] "Generic (PLEG): container finished" podID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" containerID="af0bd9293dd92cc94bae0f4f216ab7d397ce48f46b0dabbec0cb5cf4a603d709" exitCode=0 Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.269399 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" event={"ID":"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7","Type":"ContainerDied","Data":"af0bd9293dd92cc94bae0f4f216ab7d397ce48f46b0dabbec0cb5cf4a603d709"} Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.434456 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.495798 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.495840 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.495910 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.495961 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.495979 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.496010 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djz5p\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.496028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.496219 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\" (UID: \"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7\") " Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.497129 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.497144 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.501787 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.501877 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.502012 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.502199 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p" (OuterVolumeSpecName: "kube-api-access-djz5p") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "kube-api-access-djz5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.507088 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.511723 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" (UID: "85acbbfa-62a2-49ed-8d7e-7473f1be6ab7"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.597993 4791 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598203 4791 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598271 4791 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598343 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djz5p\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-kube-api-access-djz5p\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598407 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598467 4791 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:10 crc kubenswrapper[4791]: I0218 00:41:10.598548 4791 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 18 00:41:11 crc kubenswrapper[4791]: I0218 00:41:11.279829 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" event={"ID":"85acbbfa-62a2-49ed-8d7e-7473f1be6ab7","Type":"ContainerDied","Data":"9dc98f91327b9eb40a048c381fac3756355e309ce37795ba656c1676c6fdd520"} Feb 18 00:41:11 crc kubenswrapper[4791]: I0218 00:41:11.279902 4791 scope.go:117] "RemoveContainer" containerID="af0bd9293dd92cc94bae0f4f216ab7d397ce48f46b0dabbec0cb5cf4a603d709" Feb 18 00:41:11 crc kubenswrapper[4791]: I0218 00:41:11.279941 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-r7vng" Feb 18 00:41:11 crc kubenswrapper[4791]: I0218 00:41:11.300421 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:41:11 crc kubenswrapper[4791]: I0218 00:41:11.302918 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-r7vng"] Feb 18 00:41:13 crc kubenswrapper[4791]: I0218 00:41:13.069708 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" path="/var/lib/kubelet/pods/85acbbfa-62a2-49ed-8d7e-7473f1be6ab7/volumes" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.452548 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l"] Feb 18 00:41:16 crc kubenswrapper[4791]: E0218 00:41:16.453024 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" containerName="registry" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.453619 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" containerName="registry" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.453914 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="85acbbfa-62a2-49ed-8d7e-7473f1be6ab7" containerName="registry" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.454941 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.457999 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.458306 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.458481 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.459131 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l"] Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.459794 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.464566 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.625749 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/7093bbcf-8318-4506-90b3-57af442c002c-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.625820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb5p9\" (UniqueName: \"kubernetes.io/projected/7093bbcf-8318-4506-90b3-57af442c002c-kube-api-access-rb5p9\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.625853 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/7093bbcf-8318-4506-90b3-57af442c002c-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.726987 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/7093bbcf-8318-4506-90b3-57af442c002c-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.727185 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb5p9\" (UniqueName: \"kubernetes.io/projected/7093bbcf-8318-4506-90b3-57af442c002c-kube-api-access-rb5p9\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.727259 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/7093bbcf-8318-4506-90b3-57af442c002c-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.728311 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/7093bbcf-8318-4506-90b3-57af442c002c-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.733874 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/7093bbcf-8318-4506-90b3-57af442c002c-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.751207 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb5p9\" (UniqueName: \"kubernetes.io/projected/7093bbcf-8318-4506-90b3-57af442c002c-kube-api-access-rb5p9\") pod \"cluster-monitoring-operator-6d5b84845-7827l\" (UID: \"7093bbcf-8318-4506-90b3-57af442c002c\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:16 crc kubenswrapper[4791]: I0218 00:41:16.774810 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" Feb 18 00:41:17 crc kubenswrapper[4791]: I0218 00:41:17.182960 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l"] Feb 18 00:41:17 crc kubenswrapper[4791]: I0218 00:41:17.311585 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" event={"ID":"7093bbcf-8318-4506-90b3-57af442c002c","Type":"ContainerStarted","Data":"a9296926da9237473e67fd53287fceeec473575400a3667e499bb85f75bf952f"} Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.322511 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" event={"ID":"7093bbcf-8318-4506-90b3-57af442c002c","Type":"ContainerStarted","Data":"4fcfa94894eb01414822391f697b7c2a3ccd9d59d8697c2b84333b8e73c61cd7"} Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.340517 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-7827l" podStartSLOduration=1.599575862 podStartE2EDuration="3.340499147s" podCreationTimestamp="2026-02-18 00:41:16 +0000 UTC" firstStartedPulling="2026-02-18 00:41:17.195971326 +0000 UTC m=+418.763984496" lastFinishedPulling="2026-02-18 00:41:18.936894611 +0000 UTC m=+420.504907781" observedRunningTime="2026-02-18 00:41:19.336247815 +0000 UTC m=+420.904261005" watchObservedRunningTime="2026-02-18 00:41:19.340499147 +0000 UTC m=+420.908512317" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.477592 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps"] Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.478378 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.482456 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.482577 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-mchsr" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.486332 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps"] Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.661526 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/9bf07bf4-855e-435a-a801-5bf58f76f855-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-m6zps\" (UID: \"9bf07bf4-855e-435a-a801-5bf58f76f855\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.762663 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/9bf07bf4-855e-435a-a801-5bf58f76f855-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-m6zps\" (UID: \"9bf07bf4-855e-435a-a801-5bf58f76f855\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.773019 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/9bf07bf4-855e-435a-a801-5bf58f76f855-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-m6zps\" (UID: \"9bf07bf4-855e-435a-a801-5bf58f76f855\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:19 crc kubenswrapper[4791]: I0218 00:41:19.792885 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:20 crc kubenswrapper[4791]: I0218 00:41:20.190374 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps"] Feb 18 00:41:20 crc kubenswrapper[4791]: W0218 00:41:20.195299 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bf07bf4_855e_435a_a801_5bf58f76f855.slice/crio-f347a6183c198646883e321a350d7df684c3a224d4a754a853bad8297d993359 WatchSource:0}: Error finding container f347a6183c198646883e321a350d7df684c3a224d4a754a853bad8297d993359: Status 404 returned error can't find the container with id f347a6183c198646883e321a350d7df684c3a224d4a754a853bad8297d993359 Feb 18 00:41:20 crc kubenswrapper[4791]: I0218 00:41:20.331504 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" event={"ID":"9bf07bf4-855e-435a-a801-5bf58f76f855","Type":"ContainerStarted","Data":"f347a6183c198646883e321a350d7df684c3a224d4a754a853bad8297d993359"} Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.342573 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" event={"ID":"9bf07bf4-855e-435a-a801-5bf58f76f855","Type":"ContainerStarted","Data":"dd9948ce0a3144c1ef328fee9a0e34b41937fa121ac7f71fdeaf04134106dbb3"} Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.342975 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.352278 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.358734 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-m6zps" podStartSLOduration=2.167865774 podStartE2EDuration="3.358714004s" podCreationTimestamp="2026-02-18 00:41:19 +0000 UTC" firstStartedPulling="2026-02-18 00:41:20.197349295 +0000 UTC m=+421.765362485" lastFinishedPulling="2026-02-18 00:41:21.388197545 +0000 UTC m=+422.956210715" observedRunningTime="2026-02-18 00:41:22.356945349 +0000 UTC m=+423.924958529" watchObservedRunningTime="2026-02-18 00:41:22.358714004 +0000 UTC m=+423.926727194" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.526691 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-74h42"] Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.528791 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.531012 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.531020 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.531060 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-d8gv8" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.531253 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.541646 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-74h42"] Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.716313 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.716377 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cb6g\" (UniqueName: \"kubernetes.io/projected/7c075c0e-3b5e-453f-a14b-36375ee7850f-kube-api-access-5cb6g\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.716496 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.716561 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7c075c0e-3b5e-453f-a14b-36375ee7850f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.817555 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cb6g\" (UniqueName: \"kubernetes.io/projected/7c075c0e-3b5e-453f-a14b-36375ee7850f-kube-api-access-5cb6g\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.817656 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.817687 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7c075c0e-3b5e-453f-a14b-36375ee7850f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.817745 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.818666 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7c075c0e-3b5e-453f-a14b-36375ee7850f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.824201 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.831426 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/7c075c0e-3b5e-453f-a14b-36375ee7850f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.834122 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cb6g\" (UniqueName: \"kubernetes.io/projected/7c075c0e-3b5e-453f-a14b-36375ee7850f-kube-api-access-5cb6g\") pod \"prometheus-operator-db54df47d-74h42\" (UID: \"7c075c0e-3b5e-453f-a14b-36375ee7850f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:22 crc kubenswrapper[4791]: I0218 00:41:22.858663 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" Feb 18 00:41:23 crc kubenswrapper[4791]: I0218 00:41:23.272471 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-74h42"] Feb 18 00:41:23 crc kubenswrapper[4791]: W0218 00:41:23.278476 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c075c0e_3b5e_453f_a14b_36375ee7850f.slice/crio-576b9270c62e5e4167c4aecfe3ea29e47bdc98bf8e710704d53b93add373695b WatchSource:0}: Error finding container 576b9270c62e5e4167c4aecfe3ea29e47bdc98bf8e710704d53b93add373695b: Status 404 returned error can't find the container with id 576b9270c62e5e4167c4aecfe3ea29e47bdc98bf8e710704d53b93add373695b Feb 18 00:41:23 crc kubenswrapper[4791]: I0218 00:41:23.348614 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" event={"ID":"7c075c0e-3b5e-453f-a14b-36375ee7850f","Type":"ContainerStarted","Data":"576b9270c62e5e4167c4aecfe3ea29e47bdc98bf8e710704d53b93add373695b"} Feb 18 00:41:25 crc kubenswrapper[4791]: I0218 00:41:25.360416 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" event={"ID":"7c075c0e-3b5e-453f-a14b-36375ee7850f","Type":"ContainerStarted","Data":"ea33f1a1838dd2cef7d823bea57b9d64aaf8b474d8032dfb51efc0a9fd717de5"} Feb 18 00:41:25 crc kubenswrapper[4791]: I0218 00:41:25.361674 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" event={"ID":"7c075c0e-3b5e-453f-a14b-36375ee7850f","Type":"ContainerStarted","Data":"904bab28ff4f3d5c7587b82a955529a92c112b69df662ecd746cceea5ef6ca97"} Feb 18 00:41:25 crc kubenswrapper[4791]: I0218 00:41:25.382938 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-74h42" podStartSLOduration=1.859756656 podStartE2EDuration="3.382909917s" podCreationTimestamp="2026-02-18 00:41:22 +0000 UTC" firstStartedPulling="2026-02-18 00:41:23.28009319 +0000 UTC m=+424.848106360" lastFinishedPulling="2026-02-18 00:41:24.803246451 +0000 UTC m=+426.371259621" observedRunningTime="2026-02-18 00:41:25.377094639 +0000 UTC m=+426.945107819" watchObservedRunningTime="2026-02-18 00:41:25.382909917 +0000 UTC m=+426.950923087" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.799746 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.799816 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.799869 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.800535 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.800596 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f" gracePeriod=600 Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.910924 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns"] Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.912520 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.917418 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.917639 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-kmfks" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.917757 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.923682 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-hpf4g"] Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.925009 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.930047 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.930339 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-q4wkp" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.930449 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.933805 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns"] Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.953108 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4"] Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.955266 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.960355 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.960582 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.960790 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-w9v4f" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.960980 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Feb 18 00:41:26 crc kubenswrapper[4791]: I0218 00:41:26.971309 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4"] Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070780 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/f03780fe-061d-41ea-a73b-6638898c06eb-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070828 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-root\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070860 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-sys\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070883 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070912 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-metrics-client-ca\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070927 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6r8c\" (UniqueName: \"kubernetes.io/projected/f03780fe-061d-41ea-a73b-6638898c06eb-kube-api-access-j6r8c\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.070984 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071004 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071022 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071041 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwvxj\" (UniqueName: \"kubernetes.io/projected/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-kube-api-access-dwvxj\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071062 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-wtmp\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071078 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqrdr\" (UniqueName: \"kubernetes.io/projected/61e0f5f5-accc-4b41-b699-725ac04c6b73-kube-api-access-tqrdr\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071124 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/61e0f5f5-accc-4b41-b699-725ac04c6b73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071170 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071197 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071212 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-tls\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071240 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-textfile\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.071260 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172724 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/61e0f5f5-accc-4b41-b699-725ac04c6b73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172769 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172800 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172818 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-tls\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172839 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-textfile\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172853 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172872 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/f03780fe-061d-41ea-a73b-6638898c06eb-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172891 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-root\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172912 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-sys\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172929 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172952 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-metrics-client-ca\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172968 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6r8c\" (UniqueName: \"kubernetes.io/projected/f03780fe-061d-41ea-a73b-6638898c06eb-kube-api-access-j6r8c\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.172991 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173009 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173027 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173046 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwvxj\" (UniqueName: \"kubernetes.io/projected/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-kube-api-access-dwvxj\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173065 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-wtmp\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173081 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqrdr\" (UniqueName: \"kubernetes.io/projected/61e0f5f5-accc-4b41-b699-725ac04c6b73-kube-api-access-tqrdr\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.173467 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-sys\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.174128 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.174550 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/61e0f5f5-accc-4b41-b699-725ac04c6b73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.174973 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-metrics-client-ca\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.175948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-wtmp\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.175966 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-root\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: E0218 00:41:27.176039 4791 secret.go:188] Couldn't get secret openshift-monitoring/kube-state-metrics-tls: secret "kube-state-metrics-tls" not found Feb 18 00:41:27 crc kubenswrapper[4791]: E0218 00:41:27.176084 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls podName:f03780fe-061d-41ea-a73b-6638898c06eb nodeName:}" failed. No retries permitted until 2026-02-18 00:41:27.676068301 +0000 UTC m=+429.244081471 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-state-metrics-tls" (UniqueName: "kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls") pod "kube-state-metrics-777cb5bd5d-spmw4" (UID: "f03780fe-061d-41ea-a73b-6638898c06eb") : secret "kube-state-metrics-tls" not found Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.176323 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/f03780fe-061d-41ea-a73b-6638898c06eb-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.176368 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.176675 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-textfile\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.180528 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-tls\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.180752 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.181727 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.181785 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/61e0f5f5-accc-4b41-b699-725ac04c6b73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.192574 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqrdr\" (UniqueName: \"kubernetes.io/projected/61e0f5f5-accc-4b41-b699-725ac04c6b73-kube-api-access-tqrdr\") pod \"openshift-state-metrics-566fddb674-7v2ns\" (UID: \"61e0f5f5-accc-4b41-b699-725ac04c6b73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.196968 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.197675 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwvxj\" (UniqueName: \"kubernetes.io/projected/0dd661c0-8981-4c8d-9b8a-4c87f4ce3342-kube-api-access-dwvxj\") pod \"node-exporter-hpf4g\" (UID: \"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342\") " pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.199246 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6r8c\" (UniqueName: \"kubernetes.io/projected/f03780fe-061d-41ea-a73b-6638898c06eb-kube-api-access-j6r8c\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.243380 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.260833 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hpf4g" Feb 18 00:41:27 crc kubenswrapper[4791]: W0218 00:41:27.278654 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dd661c0_8981_4c8d_9b8a_4c87f4ce3342.slice/crio-1db8aac73a42298da7124bf87c948e90b11480f66cee31b89dbba285401cfd1b WatchSource:0}: Error finding container 1db8aac73a42298da7124bf87c948e90b11480f66cee31b89dbba285401cfd1b: Status 404 returned error can't find the container with id 1db8aac73a42298da7124bf87c948e90b11480f66cee31b89dbba285401cfd1b Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.377286 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f" exitCode=0 Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.377343 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f"} Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.377387 4791 scope.go:117] "RemoveContainer" containerID="cd405b51017517f7fb7b927378586637b632f5a8f6a1b4375cd166d0bda6e107" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.380018 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hpf4g" event={"ID":"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342","Type":"ContainerStarted","Data":"1db8aac73a42298da7124bf87c948e90b11480f66cee31b89dbba285401cfd1b"} Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.683458 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.690486 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/f03780fe-061d-41ea-a73b-6638898c06eb-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-spmw4\" (UID: \"f03780fe-061d-41ea-a73b-6638898c06eb\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.692403 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns"] Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.885601 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.979270 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.980846 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.982629 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.982766 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.984200 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.984389 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.984520 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.984627 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-tkkhm" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.984821 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.989397 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Feb 18 00:41:27 crc kubenswrapper[4791]: I0218 00:41:27.997595 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.007461 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.088909 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089302 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089325 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089341 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089369 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089389 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089410 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089440 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwcjw\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-kube-api-access-bwcjw\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089478 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-web-config\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089517 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-out\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.089534 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190680 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190743 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190778 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwcjw\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-kube-api-access-bwcjw\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190846 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190866 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-web-config\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190928 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-out\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190947 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190965 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190984 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.190998 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.191015 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.191439 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.192181 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.193746 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b6eca5-2927-47c6-b161-71d03a3cab4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.197360 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.197772 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.198406 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/68b6eca5-2927-47c6-b161-71d03a3cab4b-config-out\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.199137 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.200661 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-web-config\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.203824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.205814 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/68b6eca5-2927-47c6-b161-71d03a3cab4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.210801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.215323 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwcjw\" (UniqueName: \"kubernetes.io/projected/68b6eca5-2927-47c6-b161-71d03a3cab4b-kube-api-access-bwcjw\") pod \"alertmanager-main-0\" (UID: \"68b6eca5-2927-47c6-b161-71d03a3cab4b\") " pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.316582 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.388952 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c"} Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.393722 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" event={"ID":"61e0f5f5-accc-4b41-b699-725ac04c6b73","Type":"ContainerStarted","Data":"8cbf4ff652740a9afa2ae12779abe19a17344d1b406d732b604dab3d45783855"} Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.393796 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" event={"ID":"61e0f5f5-accc-4b41-b699-725ac04c6b73","Type":"ContainerStarted","Data":"e7fea18b4e177e4eed68a5359b365f1ef3257ca25be131b279dcf03dfbd918a8"} Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.393811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" event={"ID":"61e0f5f5-accc-4b41-b699-725ac04c6b73","Type":"ContainerStarted","Data":"9a1b88f0ea6621023a3ed88eb8654482db038da152b81a55ffbf1f94037ccce6"} Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.429755 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4"] Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.931339 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Feb 18 00:41:28 crc kubenswrapper[4791]: W0218 00:41:28.948258 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68b6eca5_2927_47c6_b161_71d03a3cab4b.slice/crio-56c25498e7f8610f9800935f694fca016caa67b7f5ba0d1cc19e64c41bed54ce WatchSource:0}: Error finding container 56c25498e7f8610f9800935f694fca016caa67b7f5ba0d1cc19e64c41bed54ce: Status 404 returned error can't find the container with id 56c25498e7f8610f9800935f694fca016caa67b7f5ba0d1cc19e64c41bed54ce Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.979581 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-655559474d-j6txv"] Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.981524 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.985219 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987365 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-814dcrlai6nvk" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987406 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987419 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987365 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987582 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Feb 18 00:41:28 crc kubenswrapper[4791]: I0218 00:41:28.987717 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-ggvqb" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:28.997588 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-655559474d-j6txv"] Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.103709 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.104650 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.104750 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.104824 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7f95f249-1631-4ac3-8285-e5ff5431ca59-metrics-client-ca\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.104966 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.105075 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.105149 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52kvq\" (UniqueName: \"kubernetes.io/projected/7f95f249-1631-4ac3-8285-e5ff5431ca59-kube-api-access-52kvq\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.105300 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-grpc-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206691 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206733 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206772 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206789 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7f95f249-1631-4ac3-8285-e5ff5431ca59-metrics-client-ca\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206809 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206843 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206862 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52kvq\" (UniqueName: \"kubernetes.io/projected/7f95f249-1631-4ac3-8285-e5ff5431ca59-kube-api-access-52kvq\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.206890 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-grpc-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.209179 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/7f95f249-1631-4ac3-8285-e5ff5431ca59-metrics-client-ca\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.214208 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.216747 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.218552 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-grpc-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.224024 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.229026 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52kvq\" (UniqueName: \"kubernetes.io/projected/7f95f249-1631-4ac3-8285-e5ff5431ca59-kube-api-access-52kvq\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.234775 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-tls\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.235288 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/7f95f249-1631-4ac3-8285-e5ff5431ca59-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-655559474d-j6txv\" (UID: \"7f95f249-1631-4ac3-8285-e5ff5431ca59\") " pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.305582 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.402103 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"56c25498e7f8610f9800935f694fca016caa67b7f5ba0d1cc19e64c41bed54ce"} Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.404962 4791 generic.go:334] "Generic (PLEG): container finished" podID="0dd661c0-8981-4c8d-9b8a-4c87f4ce3342" containerID="be301c655c567dea32303bf3f30e1d24c49019edb1ffe6b333bf39f6370f9304" exitCode=0 Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.405008 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hpf4g" event={"ID":"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342","Type":"ContainerDied","Data":"be301c655c567dea32303bf3f30e1d24c49019edb1ffe6b333bf39f6370f9304"} Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.411200 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" event={"ID":"f03780fe-061d-41ea-a73b-6638898c06eb","Type":"ContainerStarted","Data":"c6e1e23f5cbdc403c7827a1a7405eeebf2cda5c45bf48edd96b8da696af65b37"} Feb 18 00:41:29 crc kubenswrapper[4791]: I0218 00:41:29.830017 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-655559474d-j6txv"] Feb 18 00:41:29 crc kubenswrapper[4791]: W0218 00:41:29.849316 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f95f249_1631_4ac3_8285_e5ff5431ca59.slice/crio-afd5d517946a449d82cf484f20459a6e2b2a2557c94d0a16c0376923fd939beb WatchSource:0}: Error finding container afd5d517946a449d82cf484f20459a6e2b2a2557c94d0a16c0376923fd939beb: Status 404 returned error can't find the container with id afd5d517946a449d82cf484f20459a6e2b2a2557c94d0a16c0376923fd939beb Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.420874 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" event={"ID":"61e0f5f5-accc-4b41-b699-725ac04c6b73","Type":"ContainerStarted","Data":"cafa576d07138e8def26e9acb0f7d20c5c5d1b5a0f0bae0ff93cd7a0847a3ff6"} Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.422510 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"afd5d517946a449d82cf484f20459a6e2b2a2557c94d0a16c0376923fd939beb"} Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.424968 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hpf4g" event={"ID":"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342","Type":"ContainerStarted","Data":"e2d26cd9b38ce9c10208c3bc46e60aebf2b1eee73862d55aba94541acb45c56f"} Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.424991 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hpf4g" event={"ID":"0dd661c0-8981-4c8d-9b8a-4c87f4ce3342","Type":"ContainerStarted","Data":"d5c0f84012ed3f59e647e866227f18067daa01abb1dfaa928547cbb487220d8d"} Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.438967 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7v2ns" podStartSLOduration=3.149301237 podStartE2EDuration="4.43895464s" podCreationTimestamp="2026-02-18 00:41:26 +0000 UTC" firstStartedPulling="2026-02-18 00:41:28.11414279 +0000 UTC m=+429.682155960" lastFinishedPulling="2026-02-18 00:41:29.403796183 +0000 UTC m=+430.971809363" observedRunningTime="2026-02-18 00:41:30.438379473 +0000 UTC m=+432.006392633" watchObservedRunningTime="2026-02-18 00:41:30.43895464 +0000 UTC m=+432.006967810" Feb 18 00:41:30 crc kubenswrapper[4791]: I0218 00:41:30.455754 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-hpf4g" podStartSLOduration=3.1837418570000002 podStartE2EDuration="4.455733337s" podCreationTimestamp="2026-02-18 00:41:26 +0000 UTC" firstStartedPulling="2026-02-18 00:41:27.281342121 +0000 UTC m=+428.849355291" lastFinishedPulling="2026-02-18 00:41:28.553333601 +0000 UTC m=+430.121346771" observedRunningTime="2026-02-18 00:41:30.452439706 +0000 UTC m=+432.020452876" watchObservedRunningTime="2026-02-18 00:41:30.455733337 +0000 UTC m=+432.023746517" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.432783 4791 generic.go:334] "Generic (PLEG): container finished" podID="68b6eca5-2927-47c6-b161-71d03a3cab4b" containerID="a1bbe973dbbebab74497a1af3eefa1ca015d5d194842b6d58b4568a70a62cb97" exitCode=0 Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.432831 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerDied","Data":"a1bbe973dbbebab74497a1af3eefa1ca015d5d194842b6d58b4568a70a62cb97"} Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.434910 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" event={"ID":"f03780fe-061d-41ea-a73b-6638898c06eb","Type":"ContainerStarted","Data":"5d617b65ef912005eb3c66e4759dee845c1ac375bf6d689d9a2f0f7821300362"} Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.752485 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.753707 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.765777 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.849757 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.849806 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.849831 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.849849 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.850051 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsxbq\" (UniqueName: \"kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.850169 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.850211 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952328 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952414 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952440 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952464 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952483 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952511 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsxbq\" (UniqueName: \"kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.952542 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.953440 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.953523 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.953646 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.954342 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.958304 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.971519 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsxbq\" (UniqueName: \"kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:31 crc kubenswrapper[4791]: I0218 00:41:31.977788 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config\") pod \"console-58bb59cdb7-pw8s6\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.072344 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.287459 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-86c9f7f465-dx89c"] Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.288720 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.292360 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-86c9f7f465-dx89c"] Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.292936 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.293038 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.293136 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.293140 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-f9o8rre0j1oph" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.293218 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.293306 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-6n9t6" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.440709 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" event={"ID":"f03780fe-061d-41ea-a73b-6638898c06eb","Type":"ContainerStarted","Data":"1c589bbb3649fe577aba29a5071ed08519cdc58f1fa71abe35d0cf18f5195911"} Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.440747 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" event={"ID":"f03780fe-061d-41ea-a73b-6638898c06eb","Type":"ContainerStarted","Data":"f350521974c9e5300b964a31af531835a065ef7c211bac0704a6f8841b75100e"} Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459191 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-server-tls\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459249 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-client-certs\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459271 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-client-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459569 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-metrics-server-audit-profiles\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459786 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459840 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/f319c157-9cd1-4956-bbf0-90af20b44535-audit-log\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.459872 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjgjx\" (UniqueName: \"kubernetes.io/projected/f319c157-9cd1-4956-bbf0-90af20b44535-kube-api-access-hjgjx\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.460823 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-spmw4" podStartSLOduration=4.191556725 podStartE2EDuration="6.460806924s" podCreationTimestamp="2026-02-18 00:41:26 +0000 UTC" firstStartedPulling="2026-02-18 00:41:28.51951093 +0000 UTC m=+430.087524100" lastFinishedPulling="2026-02-18 00:41:30.788761129 +0000 UTC m=+432.356774299" observedRunningTime="2026-02-18 00:41:32.455763419 +0000 UTC m=+434.023776599" watchObservedRunningTime="2026-02-18 00:41:32.460806924 +0000 UTC m=+434.028820094" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.477736 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.561315 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.561736 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/f319c157-9cd1-4956-bbf0-90af20b44535-audit-log\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562109 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjgjx\" (UniqueName: \"kubernetes.io/projected/f319c157-9cd1-4956-bbf0-90af20b44535-kube-api-access-hjgjx\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562208 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-server-tls\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562261 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-client-certs\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562279 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-client-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562310 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562493 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/f319c157-9cd1-4956-bbf0-90af20b44535-audit-log\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.562528 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-metrics-server-audit-profiles\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.563877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/f319c157-9cd1-4956-bbf0-90af20b44535-metrics-server-audit-profiles\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.567471 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-client-certs\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.568849 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-client-ca-bundle\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.569894 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/f319c157-9cd1-4956-bbf0-90af20b44535-secret-metrics-server-tls\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.578565 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjgjx\" (UniqueName: \"kubernetes.io/projected/f319c157-9cd1-4956-bbf0-90af20b44535-kube-api-access-hjgjx\") pod \"metrics-server-86c9f7f465-dx89c\" (UID: \"f319c157-9cd1-4956-bbf0-90af20b44535\") " pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.606536 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.674795 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-8547687f6-s2jfn"] Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.675586 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.677446 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.679489 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.684270 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-8547687f6-s2jfn"] Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.765531 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8-monitoring-plugin-cert\") pod \"monitoring-plugin-8547687f6-s2jfn\" (UID: \"a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8\") " pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.867244 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8-monitoring-plugin-cert\") pod \"monitoring-plugin-8547687f6-s2jfn\" (UID: \"a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8\") " pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.870104 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8-monitoring-plugin-cert\") pod \"monitoring-plugin-8547687f6-s2jfn\" (UID: \"a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8\") " pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:32 crc kubenswrapper[4791]: I0218 00:41:32.995333 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.198067 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.199925 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.202907 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-49ohk239s6mg" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203025 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203227 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-rtcj7" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203254 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203309 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203352 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203401 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.203686 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.205026 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.205117 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.205292 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.207667 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.210948 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.224667 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373266 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373308 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbzsq\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-kube-api-access-zbzsq\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373350 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373404 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373451 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373600 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373653 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373710 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373742 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-config-out\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373769 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373788 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373844 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373922 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373968 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-web-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.373994 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.374021 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.374061 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.374109 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475376 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-web-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475438 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475471 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475509 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475553 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475603 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbzsq\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-kube-api-access-zbzsq\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475627 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475653 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475677 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475706 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475727 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475753 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475774 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-config-out\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475798 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475823 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475845 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.475871 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.476956 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.477007 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.477193 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.477788 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.478718 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-web-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.479063 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-config-out\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.479383 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.480080 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.483254 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.483838 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.484034 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.485553 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/80676bf8-0751-40d4-8e18-154e07369f27-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.491609 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-config\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.491775 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.494133 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.494244 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbzsq\" (UniqueName: \"kubernetes.io/projected/80676bf8-0751-40d4-8e18-154e07369f27-kube-api-access-zbzsq\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.495608 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.499696 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/80676bf8-0751-40d4-8e18-154e07369f27-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"80676bf8-0751-40d4-8e18-154e07369f27\") " pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:33 crc kubenswrapper[4791]: I0218 00:41:33.517519 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:34 crc kubenswrapper[4791]: W0218 00:41:34.045395 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91431e3a_c5e7_4dd9_96b7_f7fcbfebd06a.slice/crio-4422661a270e8cc29818025e3a18c12a24c601ab9e8f9a6c948555ed4f1deb9f WatchSource:0}: Error finding container 4422661a270e8cc29818025e3a18c12a24c601ab9e8f9a6c948555ed4f1deb9f: Status 404 returned error can't find the container with id 4422661a270e8cc29818025e3a18c12a24c601ab9e8f9a6c948555ed4f1deb9f Feb 18 00:41:34 crc kubenswrapper[4791]: I0218 00:41:34.289707 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-8547687f6-s2jfn"] Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.450604 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" event={"ID":"a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8","Type":"ContainerStarted","Data":"929ea65e5131b30b1ebf4586122bc99d3c27068e91ab072d6e0f564a3a5da8f7"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.452547 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"60dd66efca325357b1dcadc5b0c1bfe966473b3e1cba3985f5385cb4022b547c"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.452593 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"67820d3de1ca533a4e16bed14224e76d2867b9a2b1cc1ea534aabd6a95c5fe3f"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.454056 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"e320174b6015e3f07affe2951cbf0b7d63eb1c6638e0e20abbc4da9a25c02b9c"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.454081 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"36dfd009ebe0648abe2df320d2b442b8d7a92aca3194932cd2e275434a853a96"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.455323 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58bb59cdb7-pw8s6" event={"ID":"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a","Type":"ContainerStarted","Data":"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.455343 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58bb59cdb7-pw8s6" event={"ID":"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a","Type":"ContainerStarted","Data":"4422661a270e8cc29818025e3a18c12a24c601ab9e8f9a6c948555ed4f1deb9f"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.472087 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-58bb59cdb7-pw8s6" podStartSLOduration=3.472067302 podStartE2EDuration="3.472067302s" podCreationTimestamp="2026-02-18 00:41:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:41:34.46874041 +0000 UTC m=+436.036753590" watchObservedRunningTime="2026-02-18 00:41:34.472067302 +0000 UTC m=+436.040080472" Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.544532 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-86c9f7f465-dx89c"] Feb 18 00:41:35 crc kubenswrapper[4791]: W0218 00:41:34.547894 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf319c157_9cd1_4956_bbf0_90af20b44535.slice/crio-dc46a896c5f4f6e539295dcc8fbcb8ce54e4c9ea3a77693fd2be9de33db1739f WatchSource:0}: Error finding container dc46a896c5f4f6e539295dcc8fbcb8ce54e4c9ea3a77693fd2be9de33db1739f: Status 404 returned error can't find the container with id dc46a896c5f4f6e539295dcc8fbcb8ce54e4c9ea3a77693fd2be9de33db1739f Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:34.564703 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.463219 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"d6effc6a9a0c427e6d045ee5c4fdbf761851c1106a78e32f81d0388c6db77d69"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.465345 4791 generic.go:334] "Generic (PLEG): container finished" podID="80676bf8-0751-40d4-8e18-154e07369f27" containerID="d81f89042f7ae8848feb0c58bea391df0a00a13bb382f4b98131763929fccaeb" exitCode=0 Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.465657 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerDied","Data":"d81f89042f7ae8848feb0c58bea391df0a00a13bb382f4b98131763929fccaeb"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.465709 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"6d9ccc3a9575c9b2978daf10facab68d400689cd99c1c12c6804d06c7c573e95"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.473496 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" event={"ID":"f319c157-9cd1-4956-bbf0-90af20b44535","Type":"ContainerStarted","Data":"dc46a896c5f4f6e539295dcc8fbcb8ce54e4c9ea3a77693fd2be9de33db1739f"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.479121 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"6fc2649f9460b12faf6990c13c4b7ff8a648dcff791d0c539b85e1afc6b57597"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.479174 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"5dabaf42cf53ae24b8c247ed7e36e13e5f7383a89e32d100c8611770da937ec9"} Feb 18 00:41:35 crc kubenswrapper[4791]: I0218 00:41:35.479187 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"928ced03b1d0e9ce04e6f941b0d8f6adeb8178d6ac76c343e60d7fd07c4b7c47"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.489283 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" event={"ID":"f319c157-9cd1-4956-bbf0-90af20b44535","Type":"ContainerStarted","Data":"fdfb6cf914214b7047e84d9dd0be7210384356f13c162d39ac73c6b187468478"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.490746 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" event={"ID":"a7749b2a-f4c5-4f5a-a487-6f659d8ef3b8","Type":"ContainerStarted","Data":"e3317adb1d26f3cf5a298b669b46ff450a7bb4523496df26e854cbc5aa1d773a"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.491005 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.496207 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"68b6eca5-2927-47c6-b161-71d03a3cab4b","Type":"ContainerStarted","Data":"ee9082656d5c7a1aec88e156840772ab179bf365e234c88451957fe4ca17a167"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.496545 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.499464 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"a96281ec53ea627b5065e8ca7173854b9dbc9d21a1f1fafaa8a8a3aca69ed1dd"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.499506 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"8013aa7b42f194db8940c1e917fcda7d9b6142a44566daf8aff4ac23cd47aad9"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.499521 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" event={"ID":"7f95f249-1631-4ac3-8285-e5ff5431ca59","Type":"ContainerStarted","Data":"618e71caac4e8116fa8bd06e039e0c2a2dc30922e7957cf7a1cd48d65685d191"} Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.499715 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.516295 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" podStartSLOduration=3.79210656 podStartE2EDuration="5.516271239s" podCreationTimestamp="2026-02-18 00:41:32 +0000 UTC" firstStartedPulling="2026-02-18 00:41:34.550033462 +0000 UTC m=+436.118046632" lastFinishedPulling="2026-02-18 00:41:36.274198141 +0000 UTC m=+437.842211311" observedRunningTime="2026-02-18 00:41:37.504524898 +0000 UTC m=+439.072538078" watchObservedRunningTime="2026-02-18 00:41:37.516271239 +0000 UTC m=+439.084284409" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.532788 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" podStartSLOduration=3.075002843 podStartE2EDuration="9.532768468s" podCreationTimestamp="2026-02-18 00:41:28 +0000 UTC" firstStartedPulling="2026-02-18 00:41:29.852310001 +0000 UTC m=+431.420323171" lastFinishedPulling="2026-02-18 00:41:36.310075636 +0000 UTC m=+437.878088796" observedRunningTime="2026-02-18 00:41:37.527777953 +0000 UTC m=+439.095791143" watchObservedRunningTime="2026-02-18 00:41:37.532768468 +0000 UTC m=+439.100781638" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.574683 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=3.248162708 podStartE2EDuration="10.574660767s" podCreationTimestamp="2026-02-18 00:41:27 +0000 UTC" firstStartedPulling="2026-02-18 00:41:28.951463908 +0000 UTC m=+430.519477078" lastFinishedPulling="2026-02-18 00:41:36.277961977 +0000 UTC m=+437.845975137" observedRunningTime="2026-02-18 00:41:37.567036862 +0000 UTC m=+439.135050032" watchObservedRunningTime="2026-02-18 00:41:37.574660767 +0000 UTC m=+439.142673937" Feb 18 00:41:37 crc kubenswrapper[4791]: I0218 00:41:37.631696 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-8547687f6-s2jfn" podStartSLOduration=3.657986571 podStartE2EDuration="5.631673132s" podCreationTimestamp="2026-02-18 00:41:32 +0000 UTC" firstStartedPulling="2026-02-18 00:41:34.300539651 +0000 UTC m=+435.868552821" lastFinishedPulling="2026-02-18 00:41:36.274226222 +0000 UTC m=+437.842239382" observedRunningTime="2026-02-18 00:41:37.625487001 +0000 UTC m=+439.193500171" watchObservedRunningTime="2026-02-18 00:41:37.631673132 +0000 UTC m=+439.199686302" Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.325620 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-655559474d-j6txv" Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.512463 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"b685a3dcbe576d98905cdb2c3112d4988d0f0c61c810d41d27569d5e6ab8fbd9"} Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.512525 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"c1c63cb787db1b44b8209e89f043ab7a944506811a4df8607535ecaa6cdf1ff5"} Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.512543 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"486b274888e1c6b4705ea678fa3913b47cb55f741db2f786900c27e9eec4041c"} Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.512559 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"fb841a4ebcfcc7a33ce276c65efafb92279ab159f5d99e21e3b4aaba1ce6ab40"} Feb 18 00:41:39 crc kubenswrapper[4791]: I0218 00:41:39.512572 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"3b810ae7d7f69550fbd6882cf06548cb9f46f62133f98ede1f10cb4960893365"} Feb 18 00:41:40 crc kubenswrapper[4791]: I0218 00:41:40.527308 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"80676bf8-0751-40d4-8e18-154e07369f27","Type":"ContainerStarted","Data":"7dbbd5cc927680186c2d1ec2fae9d63f761b61cc0c8986371f10ff670f3ca40e"} Feb 18 00:41:40 crc kubenswrapper[4791]: I0218 00:41:40.565123 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=4.135207327 podStartE2EDuration="7.565108509s" podCreationTimestamp="2026-02-18 00:41:33 +0000 UTC" firstStartedPulling="2026-02-18 00:41:35.473851822 +0000 UTC m=+437.041864992" lastFinishedPulling="2026-02-18 00:41:38.903753004 +0000 UTC m=+440.471766174" observedRunningTime="2026-02-18 00:41:40.563954184 +0000 UTC m=+442.131967434" watchObservedRunningTime="2026-02-18 00:41:40.565108509 +0000 UTC m=+442.133121669" Feb 18 00:41:42 crc kubenswrapper[4791]: I0218 00:41:42.073350 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:42 crc kubenswrapper[4791]: I0218 00:41:42.073731 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:42 crc kubenswrapper[4791]: I0218 00:41:42.078990 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:42 crc kubenswrapper[4791]: I0218 00:41:42.544498 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:41:42 crc kubenswrapper[4791]: I0218 00:41:42.607246 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:41:43 crc kubenswrapper[4791]: I0218 00:41:43.518368 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:41:52 crc kubenswrapper[4791]: I0218 00:41:52.607032 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:41:52 crc kubenswrapper[4791]: I0218 00:41:52.608388 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:42:07 crc kubenswrapper[4791]: I0218 00:42:07.651149 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-rxhdk" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerName="console" containerID="cri-o://abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538" gracePeriod=15 Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.032751 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rxhdk_e1e9abd7-81f2-423d-8a79-c4102461680d/console/0.log" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.033297 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.206387 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.206510 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207273 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca" (OuterVolumeSpecName: "service-ca") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207284 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config" (OuterVolumeSpecName: "console-config") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.206543 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksj7w\" (UniqueName: \"kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207370 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207392 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207789 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.207865 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208218 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208289 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config\") pod \"e1e9abd7-81f2-423d-8a79-c4102461680d\" (UID: \"e1e9abd7-81f2-423d-8a79-c4102461680d\") " Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208893 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208914 4791 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-console-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208927 4791 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.208938 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e1e9abd7-81f2-423d-8a79-c4102461680d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.212897 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.212886 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w" (OuterVolumeSpecName: "kube-api-access-ksj7w") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "kube-api-access-ksj7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.213287 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "e1e9abd7-81f2-423d-8a79-c4102461680d" (UID: "e1e9abd7-81f2-423d-8a79-c4102461680d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.310457 4791 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.310509 4791 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e1e9abd7-81f2-423d-8a79-c4102461680d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.310522 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksj7w\" (UniqueName: \"kubernetes.io/projected/e1e9abd7-81f2-423d-8a79-c4102461680d-kube-api-access-ksj7w\") on node \"crc\" DevicePath \"\"" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743865 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rxhdk_e1e9abd7-81f2-423d-8a79-c4102461680d/console/0.log" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743910 4791 generic.go:334] "Generic (PLEG): container finished" podID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerID="abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538" exitCode=2 Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743939 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rxhdk" event={"ID":"e1e9abd7-81f2-423d-8a79-c4102461680d","Type":"ContainerDied","Data":"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538"} Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743963 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rxhdk" event={"ID":"e1e9abd7-81f2-423d-8a79-c4102461680d","Type":"ContainerDied","Data":"a8ed8ca0b7f07f0fbcced222def52c425c713c64fcde17e6c6f9360cb9d3914f"} Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743980 4791 scope.go:117] "RemoveContainer" containerID="abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.743990 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rxhdk" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.763946 4791 scope.go:117] "RemoveContainer" containerID="abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538" Feb 18 00:42:08 crc kubenswrapper[4791]: E0218 00:42:08.764494 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538\": container with ID starting with abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538 not found: ID does not exist" containerID="abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.764543 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538"} err="failed to get container status \"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538\": rpc error: code = NotFound desc = could not find container \"abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538\": container with ID starting with abb2279836c94fce9cfe9c6dbe90bc93be8872699939ccd46c6299035fa85538 not found: ID does not exist" Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.771664 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:42:08 crc kubenswrapper[4791]: I0218 00:42:08.778816 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-rxhdk"] Feb 18 00:42:09 crc kubenswrapper[4791]: I0218 00:42:09.082910 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" path="/var/lib/kubelet/pods/e1e9abd7-81f2-423d-8a79-c4102461680d/volumes" Feb 18 00:42:12 crc kubenswrapper[4791]: I0218 00:42:12.613916 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:42:12 crc kubenswrapper[4791]: I0218 00:42:12.619082 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-86c9f7f465-dx89c" Feb 18 00:42:33 crc kubenswrapper[4791]: I0218 00:42:33.518423 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:42:33 crc kubenswrapper[4791]: I0218 00:42:33.546256 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:42:33 crc kubenswrapper[4791]: I0218 00:42:33.949518 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.183999 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:43:01 crc kubenswrapper[4791]: E0218 00:43:01.185052 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerName="console" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.185071 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerName="console" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.185230 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e9abd7-81f2-423d-8a79-c4102461680d" containerName="console" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.185753 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.203781 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327595 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327649 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327673 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld7h8\" (UniqueName: \"kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327693 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327853 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327901 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.327950 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429213 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429268 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429302 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429359 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429412 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429442 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld7h8\" (UniqueName: \"kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.429475 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.430306 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.430387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.430654 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.430795 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.434631 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.436103 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.445267 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld7h8\" (UniqueName: \"kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8\") pod \"console-f49d7b5fb-z9n6g\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.510643 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:01 crc kubenswrapper[4791]: I0218 00:43:01.717970 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:43:02 crc kubenswrapper[4791]: I0218 00:43:02.117538 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f49d7b5fb-z9n6g" event={"ID":"4c121c1e-022d-48bd-9be0-e7c01c754103","Type":"ContainerStarted","Data":"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe"} Feb 18 00:43:02 crc kubenswrapper[4791]: I0218 00:43:02.118023 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f49d7b5fb-z9n6g" event={"ID":"4c121c1e-022d-48bd-9be0-e7c01c754103","Type":"ContainerStarted","Data":"d403c292f512e38c38f0fdceb7d6afaf3b42a8afffeedeb360a809eb1516edf9"} Feb 18 00:43:02 crc kubenswrapper[4791]: I0218 00:43:02.139806 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f49d7b5fb-z9n6g" podStartSLOduration=1.13978102 podStartE2EDuration="1.13978102s" podCreationTimestamp="2026-02-18 00:43:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:43:02.135778199 +0000 UTC m=+523.703791379" watchObservedRunningTime="2026-02-18 00:43:02.13978102 +0000 UTC m=+523.707794190" Feb 18 00:43:11 crc kubenswrapper[4791]: I0218 00:43:11.511284 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:11 crc kubenswrapper[4791]: I0218 00:43:11.511838 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:11 crc kubenswrapper[4791]: I0218 00:43:11.517146 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:12 crc kubenswrapper[4791]: I0218 00:43:12.193573 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:43:12 crc kubenswrapper[4791]: I0218 00:43:12.265659 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.320130 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-58bb59cdb7-pw8s6" podUID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" containerName="console" containerID="cri-o://700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1" gracePeriod=15 Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.749312 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-58bb59cdb7-pw8s6_91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a/console/0.log" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.749399 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.775958 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsxbq\" (UniqueName: \"kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776021 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776088 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776124 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776232 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776271 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.776356 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert\") pod \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\" (UID: \"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a\") " Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.777769 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config" (OuterVolumeSpecName: "console-config") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.777821 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca" (OuterVolumeSpecName: "service-ca") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.778285 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.778739 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.783267 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.783684 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq" (OuterVolumeSpecName: "kube-api-access-zsxbq") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "kube-api-access-zsxbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.783874 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" (UID: "91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878468 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsxbq\" (UniqueName: \"kubernetes.io/projected/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-kube-api-access-zsxbq\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878510 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878519 4791 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878528 4791 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878537 4791 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878546 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:37 crc kubenswrapper[4791]: I0218 00:43:37.878554 4791 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373263 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-58bb59cdb7-pw8s6_91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a/console/0.log" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373332 4791 generic.go:334] "Generic (PLEG): container finished" podID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" containerID="700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1" exitCode=2 Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373371 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58bb59cdb7-pw8s6" event={"ID":"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a","Type":"ContainerDied","Data":"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1"} Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373490 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58bb59cdb7-pw8s6" event={"ID":"91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a","Type":"ContainerDied","Data":"4422661a270e8cc29818025e3a18c12a24c601ab9e8f9a6c948555ed4f1deb9f"} Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373501 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58bb59cdb7-pw8s6" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.373516 4791 scope.go:117] "RemoveContainer" containerID="700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.395749 4791 scope.go:117] "RemoveContainer" containerID="700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1" Feb 18 00:43:38 crc kubenswrapper[4791]: E0218 00:43:38.396172 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1\": container with ID starting with 700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1 not found: ID does not exist" containerID="700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.396215 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1"} err="failed to get container status \"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1\": rpc error: code = NotFound desc = could not find container \"700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1\": container with ID starting with 700527f60eadc097c92af3074024ceac77c66022764d0d3bc42aafdb0905eeb1 not found: ID does not exist" Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.416410 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:43:38 crc kubenswrapper[4791]: I0218 00:43:38.421546 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-58bb59cdb7-pw8s6"] Feb 18 00:43:39 crc kubenswrapper[4791]: I0218 00:43:39.068821 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" path="/var/lib/kubelet/pods/91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a/volumes" Feb 18 00:43:56 crc kubenswrapper[4791]: I0218 00:43:56.800576 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:43:56 crc kubenswrapper[4791]: I0218 00:43:56.801185 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:44:26 crc kubenswrapper[4791]: I0218 00:44:26.800245 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:44:26 crc kubenswrapper[4791]: I0218 00:44:26.800889 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:44:56 crc kubenswrapper[4791]: I0218 00:44:56.800286 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:44:56 crc kubenswrapper[4791]: I0218 00:44:56.800767 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:44:56 crc kubenswrapper[4791]: I0218 00:44:56.800816 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:44:56 crc kubenswrapper[4791]: I0218 00:44:56.801506 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:44:56 crc kubenswrapper[4791]: I0218 00:44:56.801566 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c" gracePeriod=600 Feb 18 00:44:57 crc kubenswrapper[4791]: I0218 00:44:57.105671 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c" exitCode=0 Feb 18 00:44:57 crc kubenswrapper[4791]: I0218 00:44:57.105720 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c"} Feb 18 00:44:57 crc kubenswrapper[4791]: I0218 00:44:57.105771 4791 scope.go:117] "RemoveContainer" containerID="6355d2e0663f057b89b7f75a0d7f3be1bf01198676dbca6743c4cddbb8aa160f" Feb 18 00:44:58 crc kubenswrapper[4791]: I0218 00:44:58.116389 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca"} Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.209756 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp"] Feb 18 00:45:00 crc kubenswrapper[4791]: E0218 00:45:00.210308 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" containerName="console" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.210324 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" containerName="console" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.210455 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="91431e3a-c5e7-4dd9-96b7-f7fcbfebd06a" containerName="console" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.211104 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.212972 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.214466 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.215627 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp"] Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.333345 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc9nq\" (UniqueName: \"kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.333417 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.333456 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.435029 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.435169 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc9nq\" (UniqueName: \"kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.435207 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.436001 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.449993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.458883 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc9nq\" (UniqueName: \"kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq\") pod \"collect-profiles-29522925-xkcgp\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.540642 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:00 crc kubenswrapper[4791]: I0218 00:45:00.709461 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp"] Feb 18 00:45:01 crc kubenswrapper[4791]: I0218 00:45:01.137009 4791 generic.go:334] "Generic (PLEG): container finished" podID="306b19b9-f9da-46ef-b791-32fa75a9e66d" containerID="6e840fe0a4351f09c2d46c9a0e057f7caa293f3229e78604dfc2dbe0cf0b908f" exitCode=0 Feb 18 00:45:01 crc kubenswrapper[4791]: I0218 00:45:01.137224 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" event={"ID":"306b19b9-f9da-46ef-b791-32fa75a9e66d","Type":"ContainerDied","Data":"6e840fe0a4351f09c2d46c9a0e057f7caa293f3229e78604dfc2dbe0cf0b908f"} Feb 18 00:45:01 crc kubenswrapper[4791]: I0218 00:45:01.137302 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" event={"ID":"306b19b9-f9da-46ef-b791-32fa75a9e66d","Type":"ContainerStarted","Data":"6b17fd31a043274e3b0700a52868c5cd142f7b39d301e0dac11e68a14a4f993f"} Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.400806 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.462601 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc9nq\" (UniqueName: \"kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq\") pod \"306b19b9-f9da-46ef-b791-32fa75a9e66d\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.462795 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume\") pod \"306b19b9-f9da-46ef-b791-32fa75a9e66d\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.462860 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume\") pod \"306b19b9-f9da-46ef-b791-32fa75a9e66d\" (UID: \"306b19b9-f9da-46ef-b791-32fa75a9e66d\") " Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.463515 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume" (OuterVolumeSpecName: "config-volume") pod "306b19b9-f9da-46ef-b791-32fa75a9e66d" (UID: "306b19b9-f9da-46ef-b791-32fa75a9e66d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.467403 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "306b19b9-f9da-46ef-b791-32fa75a9e66d" (UID: "306b19b9-f9da-46ef-b791-32fa75a9e66d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.467808 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq" (OuterVolumeSpecName: "kube-api-access-bc9nq") pod "306b19b9-f9da-46ef-b791-32fa75a9e66d" (UID: "306b19b9-f9da-46ef-b791-32fa75a9e66d"). InnerVolumeSpecName "kube-api-access-bc9nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.563821 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/306b19b9-f9da-46ef-b791-32fa75a9e66d-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.563857 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/306b19b9-f9da-46ef-b791-32fa75a9e66d-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:02 crc kubenswrapper[4791]: I0218 00:45:02.563867 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc9nq\" (UniqueName: \"kubernetes.io/projected/306b19b9-f9da-46ef-b791-32fa75a9e66d-kube-api-access-bc9nq\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:03 crc kubenswrapper[4791]: I0218 00:45:03.150373 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" event={"ID":"306b19b9-f9da-46ef-b791-32fa75a9e66d","Type":"ContainerDied","Data":"6b17fd31a043274e3b0700a52868c5cd142f7b39d301e0dac11e68a14a4f993f"} Feb 18 00:45:03 crc kubenswrapper[4791]: I0218 00:45:03.150423 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b17fd31a043274e3b0700a52868c5cd142f7b39d301e0dac11e68a14a4f993f" Feb 18 00:45:03 crc kubenswrapper[4791]: I0218 00:45:03.150754 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.034485 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs"] Feb 18 00:45:36 crc kubenswrapper[4791]: E0218 00:45:36.035328 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="306b19b9-f9da-46ef-b791-32fa75a9e66d" containerName="collect-profiles" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.035344 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="306b19b9-f9da-46ef-b791-32fa75a9e66d" containerName="collect-profiles" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.035473 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="306b19b9-f9da-46ef-b791-32fa75a9e66d" containerName="collect-profiles" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.036622 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.040427 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.042091 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs"] Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.128470 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.128595 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.128630 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnmf8\" (UniqueName: \"kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.229269 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.229946 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnmf8\" (UniqueName: \"kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.229895 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.230506 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.230825 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.248176 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnmf8\" (UniqueName: \"kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.353958 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:36 crc kubenswrapper[4791]: I0218 00:45:36.580279 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs"] Feb 18 00:45:37 crc kubenswrapper[4791]: I0218 00:45:37.361460 4791 generic.go:334] "Generic (PLEG): container finished" podID="83933a55-a803-449b-8b3c-57ea3692f403" containerID="33996735cd2d0ba504ecbc9466241d7696b17b3fa5ab2e5c483841fb7f6bfe91" exitCode=0 Feb 18 00:45:37 crc kubenswrapper[4791]: I0218 00:45:37.361541 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" event={"ID":"83933a55-a803-449b-8b3c-57ea3692f403","Type":"ContainerDied","Data":"33996735cd2d0ba504ecbc9466241d7696b17b3fa5ab2e5c483841fb7f6bfe91"} Feb 18 00:45:37 crc kubenswrapper[4791]: I0218 00:45:37.361719 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" event={"ID":"83933a55-a803-449b-8b3c-57ea3692f403","Type":"ContainerStarted","Data":"987c3700c1f7adb527a1657a98c0fa197b7898741b841b8a0abe7543a25123fa"} Feb 18 00:45:37 crc kubenswrapper[4791]: I0218 00:45:37.363612 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 00:45:39 crc kubenswrapper[4791]: I0218 00:45:39.378306 4791 generic.go:334] "Generic (PLEG): container finished" podID="83933a55-a803-449b-8b3c-57ea3692f403" containerID="211fa30a49c66e0a01ea0a3fac630a2587f1c3273ca0b7361b5161eafb00bd34" exitCode=0 Feb 18 00:45:39 crc kubenswrapper[4791]: I0218 00:45:39.378363 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" event={"ID":"83933a55-a803-449b-8b3c-57ea3692f403","Type":"ContainerDied","Data":"211fa30a49c66e0a01ea0a3fac630a2587f1c3273ca0b7361b5161eafb00bd34"} Feb 18 00:45:40 crc kubenswrapper[4791]: I0218 00:45:40.387920 4791 generic.go:334] "Generic (PLEG): container finished" podID="83933a55-a803-449b-8b3c-57ea3692f403" containerID="0413cc712fdb0861a2a7b91dcc5097000d9f7a1759fab325cfd0410c41b5b613" exitCode=0 Feb 18 00:45:40 crc kubenswrapper[4791]: I0218 00:45:40.387983 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" event={"ID":"83933a55-a803-449b-8b3c-57ea3692f403","Type":"ContainerDied","Data":"0413cc712fdb0861a2a7b91dcc5097000d9f7a1759fab325cfd0410c41b5b613"} Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.626767 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.708509 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util\") pod \"83933a55-a803-449b-8b3c-57ea3692f403\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.708562 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle\") pod \"83933a55-a803-449b-8b3c-57ea3692f403\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.708627 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnmf8\" (UniqueName: \"kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8\") pod \"83933a55-a803-449b-8b3c-57ea3692f403\" (UID: \"83933a55-a803-449b-8b3c-57ea3692f403\") " Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.711827 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle" (OuterVolumeSpecName: "bundle") pod "83933a55-a803-449b-8b3c-57ea3692f403" (UID: "83933a55-a803-449b-8b3c-57ea3692f403"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.715333 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8" (OuterVolumeSpecName: "kube-api-access-rnmf8") pod "83933a55-a803-449b-8b3c-57ea3692f403" (UID: "83933a55-a803-449b-8b3c-57ea3692f403"). InnerVolumeSpecName "kube-api-access-rnmf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.723933 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util" (OuterVolumeSpecName: "util") pod "83933a55-a803-449b-8b3c-57ea3692f403" (UID: "83933a55-a803-449b-8b3c-57ea3692f403"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.809985 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnmf8\" (UniqueName: \"kubernetes.io/projected/83933a55-a803-449b-8b3c-57ea3692f403-kube-api-access-rnmf8\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.810230 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:41 crc kubenswrapper[4791]: I0218 00:45:41.810320 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/83933a55-a803-449b-8b3c-57ea3692f403-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:42 crc kubenswrapper[4791]: I0218 00:45:42.402256 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" event={"ID":"83933a55-a803-449b-8b3c-57ea3692f403","Type":"ContainerDied","Data":"987c3700c1f7adb527a1657a98c0fa197b7898741b841b8a0abe7543a25123fa"} Feb 18 00:45:42 crc kubenswrapper[4791]: I0218 00:45:42.402304 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="987c3700c1f7adb527a1657a98c0fa197b7898741b841b8a0abe7543a25123fa" Feb 18 00:45:42 crc kubenswrapper[4791]: I0218 00:45:42.402346 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs" Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.423867 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tr5hg"] Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.425940 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-controller" containerID="cri-o://4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.426041 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="nbdb" containerID="cri-o://097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.426024 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="sbdb" containerID="cri-o://5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.426172 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-node" containerID="cri-o://73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.425995 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.426200 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="northd" containerID="cri-o://31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.426348 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-acl-logging" containerID="cri-o://73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" gracePeriod=30 Feb 18 00:45:47 crc kubenswrapper[4791]: I0218 00:45:47.467032 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" containerID="cri-o://30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" gracePeriod=30 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.449514 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovnkube-controller/3.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.452439 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-acl-logging/0.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.452929 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-controller/0.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453466 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" exitCode=0 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453493 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" exitCode=0 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453501 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" exitCode=0 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453510 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" exitCode=0 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453516 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" exitCode=143 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453524 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" exitCode=143 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453613 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453625 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453635 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453644 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453654 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.453669 4791 scope.go:117] "RemoveContainer" containerID="cc0c28173c82f93e7859b1e377355478f6d13cfa268519e0d647485149938264" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.456081 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/2.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.456475 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/1.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.456559 4791 generic.go:334] "Generic (PLEG): container finished" podID="83bdb769-59eb-4472-ba08-be5897ee2cd6" containerID="fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0" exitCode=2 Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.456630 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerDied","Data":"fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0"} Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.457127 4791 scope.go:117] "RemoveContainer" containerID="fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.457690 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-d2kpn_openshift-multus(83bdb769-59eb-4472-ba08-be5897ee2cd6)\"" pod="openshift-multus/multus-d2kpn" podUID="83bdb769-59eb-4472-ba08-be5897ee2cd6" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.474233 4791 scope.go:117] "RemoveContainer" containerID="63aa39cbb146d0f470c77f1b8ae9dd29616913e05a79de889b8e684b40621099" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.665517 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-acl-logging/0.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.666302 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-controller/0.log" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.666740 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797275 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797588 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797672 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797738 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797799 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797876 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.797955 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798016 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798091 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798188 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798362 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798426 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798485 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798554 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798626 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xql4c\" (UniqueName: \"kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798695 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798765 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798843 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798947 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.799055 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd\") pod \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\" (UID: \"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1\") " Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798191 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798226 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798249 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798268 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log" (OuterVolumeSpecName: "node-log") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798294 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798718 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798750 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket" (OuterVolumeSpecName: "log-socket") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798768 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798786 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash" (OuterVolumeSpecName: "host-slash") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798802 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798824 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.798840 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.799118 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.799461 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.799579 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.800280 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.800329 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.809126 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c" (OuterVolumeSpecName: "kube-api-access-xql4c") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "kube-api-access-xql4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.817127 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820358 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rgsmz"] Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820610 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820626 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820639 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="util" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820648 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="util" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820659 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="sbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820667 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="sbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820682 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820690 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820700 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="nbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820706 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="nbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820714 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820721 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820732 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kubecfg-setup" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820740 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kubecfg-setup" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820749 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820756 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820764 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-ovn-metrics" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820772 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-ovn-metrics" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820784 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="northd" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820792 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="northd" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820804 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820811 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820822 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="pull" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820829 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="pull" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820839 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="extract" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820847 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="extract" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820854 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-acl-logging" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820861 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-acl-logging" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.820878 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-node" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.820885 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-node" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821046 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821060 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="83933a55-a803-449b-8b3c-57ea3692f403" containerName="extract" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821071 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="northd" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821084 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="nbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821096 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821104 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821112 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-node" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821461 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821474 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="sbdb" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821485 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovn-acl-logging" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821497 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="kube-rbac-proxy-ovn-metrics" Feb 18 00:45:48 crc kubenswrapper[4791]: E0218 00:45:48.821655 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821666 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821810 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.821820 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerName="ovnkube-controller" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.824752 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.841580 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" (UID: "3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900756 4791 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900801 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xql4c\" (UniqueName: \"kubernetes.io/projected/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-kube-api-access-xql4c\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900814 4791 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900827 4791 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900838 4791 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-bin\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900849 4791 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-systemd-units\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900859 4791 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-cni-netd\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900868 4791 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-systemd\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900880 4791 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900891 4791 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900903 4791 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-node-log\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900915 4791 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-kubelet\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900925 4791 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900935 4791 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-netns\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900948 4791 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900958 4791 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-host-slash\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900968 4791 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900977 4791 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-log-socket\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900988 4791 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:48 crc kubenswrapper[4791]: I0218 00:45:48.900998 4791 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002096 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-netd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002151 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-netns\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002323 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-var-lib-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002367 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-slash\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002396 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttsdl\" (UniqueName: \"kubernetes.io/projected/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-kube-api-access-ttsdl\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002432 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-etc-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002478 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-systemd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002535 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002565 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-kubelet\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002618 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002651 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovn-node-metrics-cert\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002680 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-node-log\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002696 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-bin\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002711 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-script-lib\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002770 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-systemd-units\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002811 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-ovn\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.002921 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-config\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.003071 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-log-socket\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.003107 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-env-overrides\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.003130 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105848 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105895 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovn-node-metrics-cert\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105915 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-node-log\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105931 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-bin\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105945 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-script-lib\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105962 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-systemd-units\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105978 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-ovn\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105993 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-config\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.105987 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106033 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-log-socket\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106050 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-env-overrides\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106064 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106088 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-netd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106105 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-netns\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106127 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-var-lib-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106141 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-slash\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106174 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttsdl\" (UniqueName: \"kubernetes.io/projected/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-kube-api-access-ttsdl\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106191 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-etc-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106214 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-systemd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106233 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106250 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-kubelet\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106312 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-kubelet\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106347 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-systemd-units\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106368 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-ovn\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106576 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-node-log\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106633 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-var-lib-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106657 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-log-socket\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-script-lib\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106906 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-bin\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106931 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-etc-openvswitch\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106952 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-slash\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.106985 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovnkube-config\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107021 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-run-systemd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107033 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-env-overrides\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107043 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107067 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-cni-netd\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107069 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-ovn-kubernetes\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.107089 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-host-run-netns\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.110528 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-ovn-node-metrics-cert\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.133129 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttsdl\" (UniqueName: \"kubernetes.io/projected/94a945c7-1e4c-4b9b-a4d9-627d888e59c9-kube-api-access-ttsdl\") pod \"ovnkube-node-rgsmz\" (UID: \"94a945c7-1e4c-4b9b-a4d9-627d888e59c9\") " pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.160690 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.463796 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/2.log" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.465865 4791 generic.go:334] "Generic (PLEG): container finished" podID="94a945c7-1e4c-4b9b-a4d9-627d888e59c9" containerID="4eaebb099c0245d79e0b3572d6e8d767c4c6685b9fd30e4a89235184a7cecaa9" exitCode=0 Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.465921 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerDied","Data":"4eaebb099c0245d79e0b3572d6e8d767c4c6685b9fd30e4a89235184a7cecaa9"} Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.465948 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"c0e2bdb8035d359fd4d4649aef7eb1088c096452ba7769f68b96e170ce35f531"} Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.485239 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-acl-logging/0.log" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.485727 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tr5hg_3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/ovn-controller/0.log" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486035 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" exitCode=0 Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486059 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" containerID="73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" exitCode=0 Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486087 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a"} Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486135 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9"} Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486147 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" event={"ID":"3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1","Type":"ContainerDied","Data":"5457055bb43f94330ff5781a028bde94b56febcde31f08807ade6fe3fee37ea0"} Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486181 4791 scope.go:117] "RemoveContainer" containerID="30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.486321 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tr5hg" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.504167 4791 scope.go:117] "RemoveContainer" containerID="5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.516553 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tr5hg"] Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.522611 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tr5hg"] Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.561374 4791 scope.go:117] "RemoveContainer" containerID="097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.591435 4791 scope.go:117] "RemoveContainer" containerID="31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.609737 4791 scope.go:117] "RemoveContainer" containerID="3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.627993 4791 scope.go:117] "RemoveContainer" containerID="73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.662101 4791 scope.go:117] "RemoveContainer" containerID="73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.677456 4791 scope.go:117] "RemoveContainer" containerID="4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.697362 4791 scope.go:117] "RemoveContainer" containerID="4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.723781 4791 scope.go:117] "RemoveContainer" containerID="30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.724352 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380\": container with ID starting with 30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380 not found: ID does not exist" containerID="30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724392 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380"} err="failed to get container status \"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380\": rpc error: code = NotFound desc = could not find container \"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380\": container with ID starting with 30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724419 4791 scope.go:117] "RemoveContainer" containerID="5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.724718 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\": container with ID starting with 5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea not found: ID does not exist" containerID="5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724738 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea"} err="failed to get container status \"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\": rpc error: code = NotFound desc = could not find container \"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\": container with ID starting with 5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724754 4791 scope.go:117] "RemoveContainer" containerID="097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.724952 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\": container with ID starting with 097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee not found: ID does not exist" containerID="097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724972 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee"} err="failed to get container status \"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\": rpc error: code = NotFound desc = could not find container \"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\": container with ID starting with 097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.724986 4791 scope.go:117] "RemoveContainer" containerID="31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.725229 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\": container with ID starting with 31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047 not found: ID does not exist" containerID="31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725249 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047"} err="failed to get container status \"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\": rpc error: code = NotFound desc = could not find container \"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\": container with ID starting with 31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725262 4791 scope.go:117] "RemoveContainer" containerID="3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.725482 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\": container with ID starting with 3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a not found: ID does not exist" containerID="3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725501 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a"} err="failed to get container status \"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\": rpc error: code = NotFound desc = could not find container \"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\": container with ID starting with 3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725514 4791 scope.go:117] "RemoveContainer" containerID="73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.725780 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\": container with ID starting with 73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9 not found: ID does not exist" containerID="73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725799 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9"} err="failed to get container status \"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\": rpc error: code = NotFound desc = could not find container \"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\": container with ID starting with 73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.725822 4791 scope.go:117] "RemoveContainer" containerID="73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.726025 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\": container with ID starting with 73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269 not found: ID does not exist" containerID="73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726045 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269"} err="failed to get container status \"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\": rpc error: code = NotFound desc = could not find container \"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\": container with ID starting with 73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726057 4791 scope.go:117] "RemoveContainer" containerID="4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.726325 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\": container with ID starting with 4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c not found: ID does not exist" containerID="4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726347 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c"} err="failed to get container status \"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\": rpc error: code = NotFound desc = could not find container \"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\": container with ID starting with 4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726360 4791 scope.go:117] "RemoveContainer" containerID="4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357" Feb 18 00:45:49 crc kubenswrapper[4791]: E0218 00:45:49.726611 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\": container with ID starting with 4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357 not found: ID does not exist" containerID="4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726632 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357"} err="failed to get container status \"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\": rpc error: code = NotFound desc = could not find container \"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\": container with ID starting with 4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726646 4791 scope.go:117] "RemoveContainer" containerID="30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726849 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380"} err="failed to get container status \"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380\": rpc error: code = NotFound desc = could not find container \"30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380\": container with ID starting with 30985d2c2ef7798a054a00074126dfc336988e91f8bb27449f3fb3b29c0c6380 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.726866 4791 scope.go:117] "RemoveContainer" containerID="5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727083 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea"} err="failed to get container status \"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\": rpc error: code = NotFound desc = could not find container \"5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea\": container with ID starting with 5dd54689c29bb9b76bd30fed5f9207a3e4851ae28f2c3c981b4eb5f8f990ebea not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727101 4791 scope.go:117] "RemoveContainer" containerID="097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727304 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee"} err="failed to get container status \"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\": rpc error: code = NotFound desc = could not find container \"097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee\": container with ID starting with 097f356d16c2a41f390ae6ca519cd3a37c5d4c6b6153e1aeaab29c443d2273ee not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727321 4791 scope.go:117] "RemoveContainer" containerID="31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727682 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047"} err="failed to get container status \"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\": rpc error: code = NotFound desc = could not find container \"31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047\": container with ID starting with 31489adbb260da0125b689db67507ef413de458ab115c3f09c26fbf9eacae047 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727700 4791 scope.go:117] "RemoveContainer" containerID="3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727953 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a"} err="failed to get container status \"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\": rpc error: code = NotFound desc = could not find container \"3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a\": container with ID starting with 3aec58c933f7ea7ee22b975dbd9dfc2f2d81d2304a77bdb8d86b05346a6a6e9a not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.727969 4791 scope.go:117] "RemoveContainer" containerID="73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728147 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9"} err="failed to get container status \"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\": rpc error: code = NotFound desc = could not find container \"73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9\": container with ID starting with 73a568d6d44feff9257f3468877ee437799ef32490351166e407230c72bcf4a9 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728177 4791 scope.go:117] "RemoveContainer" containerID="73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728382 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269"} err="failed to get container status \"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\": rpc error: code = NotFound desc = could not find container \"73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269\": container with ID starting with 73662d5b7e783cc4738a7b45986256323e343132a9c385a1ce434a6a15644269 not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728398 4791 scope.go:117] "RemoveContainer" containerID="4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728608 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c"} err="failed to get container status \"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\": rpc error: code = NotFound desc = could not find container \"4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c\": container with ID starting with 4149d71b8ba3d0b6d653d9193ccae40ebcccb025a7c5e236d8e840ff4052a42c not found: ID does not exist" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728624 4791 scope.go:117] "RemoveContainer" containerID="4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357" Feb 18 00:45:49 crc kubenswrapper[4791]: I0218 00:45:49.728849 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357"} err="failed to get container status \"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\": rpc error: code = NotFound desc = could not find container \"4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357\": container with ID starting with 4afab874811bf3ef3cee66593edb46b404687a636a854675e31c9e690dbb5357 not found: ID does not exist" Feb 18 00:45:50 crc kubenswrapper[4791]: I0218 00:45:50.495468 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"52775381e4b39ec5cff5272451e1ba54323768829a877cf40bc34a6d8164e41f"} Feb 18 00:45:50 crc kubenswrapper[4791]: I0218 00:45:50.495805 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"7b1c1761a9d780a78b27b90e4184f78b06a888e9c80d7929ac8fa935ff81cb56"} Feb 18 00:45:50 crc kubenswrapper[4791]: I0218 00:45:50.495820 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"8ff572ef3851ba431b4cf591024392b0a1d3229f7d360416feb7edb9d9c74e9d"} Feb 18 00:45:50 crc kubenswrapper[4791]: I0218 00:45:50.495831 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"3d33ce1175467c4d024419358ef278f79feb4f6aa658816da37eabdeb0807b3d"} Feb 18 00:45:50 crc kubenswrapper[4791]: I0218 00:45:50.495841 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"91380f224d7d570ae566e6d4c3883c061a17cb6c00fd1975b7ce8e257fd6898b"} Feb 18 00:45:51 crc kubenswrapper[4791]: I0218 00:45:51.068302 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1" path="/var/lib/kubelet/pods/3e8bbea5-4dc2-4c12-ad0e-eb3be03726e1/volumes" Feb 18 00:45:51 crc kubenswrapper[4791]: I0218 00:45:51.501940 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"49c3d65a61a248d02a811eebae04d932c70a54377eb073527319133801ea6526"} Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.515918 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"07c1ee4087d175b7c7fd1f5c094a50266eb687b9242d50ffb9231a616be73946"} Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.700487 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6"] Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.701504 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.703372 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-bjmpr" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.703394 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.705362 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.816083 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j"] Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.817361 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.819680 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-wtwnj" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.820256 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.823525 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx"] Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.824184 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.865238 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txfjv\" (UniqueName: \"kubernetes.io/projected/9179defa-6414-46af-857c-9459169745e5-kube-api-access-txfjv\") pod \"obo-prometheus-operator-68bc856cb9-tqvq6\" (UID: \"9179defa-6414-46af-857c-9459169745e5\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.966623 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txfjv\" (UniqueName: \"kubernetes.io/projected/9179defa-6414-46af-857c-9459169745e5-kube-api-access-txfjv\") pod \"obo-prometheus-operator-68bc856cb9-tqvq6\" (UID: \"9179defa-6414-46af-857c-9459169745e5\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.966695 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.966764 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.966793 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.966832 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:53 crc kubenswrapper[4791]: I0218 00:45:53.987124 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txfjv\" (UniqueName: \"kubernetes.io/projected/9179defa-6414-46af-857c-9459169745e5-kube-api-access-txfjv\") pod \"obo-prometheus-operator-68bc856cb9-tqvq6\" (UID: \"9179defa-6414-46af-857c-9459169745e5\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.019328 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-tdc8p"] Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.020886 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.023106 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.023187 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-9nl5x" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.026910 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.061187 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(24474ab2eab03cfe8701ea85cba1c03f6fa02d2b3f293aba03433ef768730bdf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.061265 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(24474ab2eab03cfe8701ea85cba1c03f6fa02d2b3f293aba03433ef768730bdf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.061292 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(24474ab2eab03cfe8701ea85cba1c03f6fa02d2b3f293aba03433ef768730bdf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.061350 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(24474ab2eab03cfe8701ea85cba1c03f6fa02d2b3f293aba03433ef768730bdf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" podUID="9179defa-6414-46af-857c-9459169745e5" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.067786 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.067859 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.067880 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.067913 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.070879 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.070912 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01b8edc9-ae6f-42d9-8f91-d7918bb3959f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j\" (UID: \"01b8edc9-ae6f-42d9-8f91-d7918bb3959f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.071491 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.072594 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2d01089e-d6f0-4bfb-8ad1-5269e8a912fa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx\" (UID: \"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.137038 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.146764 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.169219 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/503c2fbe-9550-4943-bdf7-ce6372a20de2-observability-operator-tls\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.169273 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kj7k\" (UniqueName: \"kubernetes.io/projected/503c2fbe-9550-4943-bdf7-ce6372a20de2-kube-api-access-8kj7k\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.171969 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(915d1b89b80ffca0043522d0749adc589c6167d5fa9608698d7192d935cebbe0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.172032 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(915d1b89b80ffca0043522d0749adc589c6167d5fa9608698d7192d935cebbe0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.172057 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(915d1b89b80ffca0043522d0749adc589c6167d5fa9608698d7192d935cebbe0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.172107 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(915d1b89b80ffca0043522d0749adc589c6167d5fa9608698d7192d935cebbe0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" podUID="01b8edc9-ae6f-42d9-8f91-d7918bb3959f" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.186860 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(f8582ed4abcb955df45b0f768cac8aac6a341bf53807b4575a97e8c886fc12b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.186934 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(f8582ed4abcb955df45b0f768cac8aac6a341bf53807b4575a97e8c886fc12b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.186957 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(f8582ed4abcb955df45b0f768cac8aac6a341bf53807b4575a97e8c886fc12b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.187001 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(f8582ed4abcb955df45b0f768cac8aac6a341bf53807b4575a97e8c886fc12b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" podUID="2d01089e-d6f0-4bfb-8ad1-5269e8a912fa" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.214188 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-dp9pl"] Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.214949 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.218916 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-mfv5q" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.271104 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/503c2fbe-9550-4943-bdf7-ce6372a20de2-observability-operator-tls\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.271187 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kj7k\" (UniqueName: \"kubernetes.io/projected/503c2fbe-9550-4943-bdf7-ce6372a20de2-kube-api-access-8kj7k\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.278886 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/503c2fbe-9550-4943-bdf7-ce6372a20de2-observability-operator-tls\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.300018 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kj7k\" (UniqueName: \"kubernetes.io/projected/503c2fbe-9550-4943-bdf7-ce6372a20de2-kube-api-access-8kj7k\") pod \"observability-operator-59bdc8b94-tdc8p\" (UID: \"503c2fbe-9550-4943-bdf7-ce6372a20de2\") " pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.336185 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.358935 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(aba975b14ac1c608585e1cf1fdf468e8ffb2d0d2d93aaec91faa3a2576a5b6e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.359007 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(aba975b14ac1c608585e1cf1fdf468e8ffb2d0d2d93aaec91faa3a2576a5b6e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.359039 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(aba975b14ac1c608585e1cf1fdf468e8ffb2d0d2d93aaec91faa3a2576a5b6e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.359111 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(aba975b14ac1c608585e1cf1fdf468e8ffb2d0d2d93aaec91faa3a2576a5b6e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" podUID="503c2fbe-9550-4943-bdf7-ce6372a20de2" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.372634 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d77ea4ed-d272-4d64-83a8-b849f88861d1-openshift-service-ca\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.373077 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m82r9\" (UniqueName: \"kubernetes.io/projected/d77ea4ed-d272-4d64-83a8-b849f88861d1-kube-api-access-m82r9\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.474827 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d77ea4ed-d272-4d64-83a8-b849f88861d1-openshift-service-ca\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.475221 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m82r9\" (UniqueName: \"kubernetes.io/projected/d77ea4ed-d272-4d64-83a8-b849f88861d1-kube-api-access-m82r9\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.475720 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d77ea4ed-d272-4d64-83a8-b849f88861d1-openshift-service-ca\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.494565 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m82r9\" (UniqueName: \"kubernetes.io/projected/d77ea4ed-d272-4d64-83a8-b849f88861d1-kube-api-access-m82r9\") pod \"perses-operator-5bf474d74f-dp9pl\" (UID: \"d77ea4ed-d272-4d64-83a8-b849f88861d1\") " pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: I0218 00:45:54.536112 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.560759 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(50bbc5a3bdf66c3acd1ba4b96790ae08cf3457819a93cd94a12dc0e35309be4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.560829 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(50bbc5a3bdf66c3acd1ba4b96790ae08cf3457819a93cd94a12dc0e35309be4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.560851 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(50bbc5a3bdf66c3acd1ba4b96790ae08cf3457819a93cd94a12dc0e35309be4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:54 crc kubenswrapper[4791]: E0218 00:45:54.560896 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(50bbc5a3bdf66c3acd1ba4b96790ae08cf3457819a93cd94a12dc0e35309be4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" podUID="d77ea4ed-d272-4d64-83a8-b849f88861d1" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.532508 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" event={"ID":"94a945c7-1e4c-4b9b-a4d9-627d888e59c9","Type":"ContainerStarted","Data":"cb738ce2f40e6fe4f77ae9e6b47aa37987a4b05ee032e0c7aa80e50a2044eb50"} Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.532931 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.532985 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.532997 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.564885 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.565401 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.571698 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" podStartSLOduration=7.571676537 podStartE2EDuration="7.571676537s" podCreationTimestamp="2026-02-18 00:45:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:45:55.564474936 +0000 UTC m=+697.132488106" watchObservedRunningTime="2026-02-18 00:45:55.571676537 +0000 UTC m=+697.139689707" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.813943 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-tdc8p"] Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.814079 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.814596 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.840096 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(70e1fefcde4f9570d8e2d2d4f6fd5bdde0d373a6d71117264343c9fd64ae903c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.840203 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(70e1fefcde4f9570d8e2d2d4f6fd5bdde0d373a6d71117264343c9fd64ae903c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.840233 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(70e1fefcde4f9570d8e2d2d4f6fd5bdde0d373a6d71117264343c9fd64ae903c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.840297 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(70e1fefcde4f9570d8e2d2d4f6fd5bdde0d373a6d71117264343c9fd64ae903c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" podUID="503c2fbe-9550-4943-bdf7-ce6372a20de2" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.869114 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx"] Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.869236 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.869606 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.914579 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(ef678603209536b799f01baf6d540fffa15aeb45bef2eb716b8cc9510ec2e2dd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.914642 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(ef678603209536b799f01baf6d540fffa15aeb45bef2eb716b8cc9510ec2e2dd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.914663 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(ef678603209536b799f01baf6d540fffa15aeb45bef2eb716b8cc9510ec2e2dd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.914712 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(ef678603209536b799f01baf6d540fffa15aeb45bef2eb716b8cc9510ec2e2dd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" podUID="2d01089e-d6f0-4bfb-8ad1-5269e8a912fa" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.916651 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-dp9pl"] Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.916769 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.917199 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.929486 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6"] Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.929594 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.930019 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.940429 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(4fa2205183849b6ec07ca1932ae424e412247dd4caa931d7363367e9e46d843b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.940489 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(4fa2205183849b6ec07ca1932ae424e412247dd4caa931d7363367e9e46d843b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.940511 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(4fa2205183849b6ec07ca1932ae424e412247dd4caa931d7363367e9e46d843b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.940557 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(4fa2205183849b6ec07ca1932ae424e412247dd4caa931d7363367e9e46d843b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" podUID="d77ea4ed-d272-4d64-83a8-b849f88861d1" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.951255 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j"] Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.951384 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:55 crc kubenswrapper[4791]: I0218 00:45:55.951831 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.972549 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(77c4c9282b1c6f4a1ef85e389c47068c5965e5fedb99df0eedd5b638bbc3fb10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.972610 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(77c4c9282b1c6f4a1ef85e389c47068c5965e5fedb99df0eedd5b638bbc3fb10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.972632 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(77c4c9282b1c6f4a1ef85e389c47068c5965e5fedb99df0eedd5b638bbc3fb10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.972676 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(77c4c9282b1c6f4a1ef85e389c47068c5965e5fedb99df0eedd5b638bbc3fb10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" podUID="9179defa-6414-46af-857c-9459169745e5" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.984771 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(72b15e59f70a02b268a3c50727da28e0ccd51d615ca3def2d525950788caedb8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.984852 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(72b15e59f70a02b268a3c50727da28e0ccd51d615ca3def2d525950788caedb8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.984880 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(72b15e59f70a02b268a3c50727da28e0ccd51d615ca3def2d525950788caedb8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:45:55 crc kubenswrapper[4791]: E0218 00:45:55.984929 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(72b15e59f70a02b268a3c50727da28e0ccd51d615ca3def2d525950788caedb8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" podUID="01b8edc9-ae6f-42d9-8f91-d7918bb3959f" Feb 18 00:46:00 crc kubenswrapper[4791]: I0218 00:46:00.061660 4791 scope.go:117] "RemoveContainer" containerID="fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0" Feb 18 00:46:00 crc kubenswrapper[4791]: E0218 00:46:00.062726 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-d2kpn_openshift-multus(83bdb769-59eb-4472-ba08-be5897ee2cd6)\"" pod="openshift-multus/multus-d2kpn" podUID="83bdb769-59eb-4472-ba08-be5897ee2cd6" Feb 18 00:46:07 crc kubenswrapper[4791]: I0218 00:46:07.061220 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:07 crc kubenswrapper[4791]: I0218 00:46:07.062104 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:07 crc kubenswrapper[4791]: E0218 00:46:07.089497 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(13256dda5ac7cd659695166473e4ee3749a9b4eadda358fbdebe60871afef4e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:46:07 crc kubenswrapper[4791]: E0218 00:46:07.089578 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(13256dda5ac7cd659695166473e4ee3749a9b4eadda358fbdebe60871afef4e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:07 crc kubenswrapper[4791]: E0218 00:46:07.089624 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(13256dda5ac7cd659695166473e4ee3749a9b4eadda358fbdebe60871afef4e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:07 crc kubenswrapper[4791]: E0218 00:46:07.089701 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-tdc8p_openshift-operators(503c2fbe-9550-4943-bdf7-ce6372a20de2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-tdc8p_openshift-operators_503c2fbe-9550-4943-bdf7-ce6372a20de2_0(13256dda5ac7cd659695166473e4ee3749a9b4eadda358fbdebe60871afef4e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" podUID="503c2fbe-9550-4943-bdf7-ce6372a20de2" Feb 18 00:46:10 crc kubenswrapper[4791]: I0218 00:46:10.060714 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:10 crc kubenswrapper[4791]: I0218 00:46:10.061440 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:10 crc kubenswrapper[4791]: E0218 00:46:10.087138 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(e8470bdf530ceb083d5b84d0c18a8d753de94fbe0ccdf6b1ca05af4bfcf418a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:46:10 crc kubenswrapper[4791]: E0218 00:46:10.087210 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(e8470bdf530ceb083d5b84d0c18a8d753de94fbe0ccdf6b1ca05af4bfcf418a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:10 crc kubenswrapper[4791]: E0218 00:46:10.087230 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(e8470bdf530ceb083d5b84d0c18a8d753de94fbe0ccdf6b1ca05af4bfcf418a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:10 crc kubenswrapper[4791]: E0218 00:46:10.087274 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators(01b8edc9-ae6f-42d9-8f91-d7918bb3959f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_openshift-operators_01b8edc9-ae6f-42d9-8f91-d7918bb3959f_0(e8470bdf530ceb083d5b84d0c18a8d753de94fbe0ccdf6b1ca05af4bfcf418a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" podUID="01b8edc9-ae6f-42d9-8f91-d7918bb3959f" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.060092 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.060206 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.060250 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.061118 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.061127 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:11 crc kubenswrapper[4791]: I0218 00:46:11.061710 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.122185 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(e6bb4b29f2c6f67c260659998c19834fe8e58c23dec3143f131ea89968ae4818): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.122265 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(e6bb4b29f2c6f67c260659998c19834fe8e58c23dec3143f131ea89968ae4818): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.122290 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(e6bb4b29f2c6f67c260659998c19834fe8e58c23dec3143f131ea89968ae4818): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.122338 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators(9179defa-6414-46af-857c-9459169745e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-tqvq6_openshift-operators_9179defa-6414-46af-857c-9459169745e5_0(e6bb4b29f2c6f67c260659998c19834fe8e58c23dec3143f131ea89968ae4818): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" podUID="9179defa-6414-46af-857c-9459169745e5" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.128473 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(e4bf284ee3a2d87a304b0e9ea17f4cc61114962b04e3290e6c10ef51df4df6df): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.128598 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(e4bf284ee3a2d87a304b0e9ea17f4cc61114962b04e3290e6c10ef51df4df6df): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.128632 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(e4bf284ee3a2d87a304b0e9ea17f4cc61114962b04e3290e6c10ef51df4df6df): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.128681 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators(2d01089e-d6f0-4bfb-8ad1-5269e8a912fa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_openshift-operators_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa_0(e4bf284ee3a2d87a304b0e9ea17f4cc61114962b04e3290e6c10ef51df4df6df): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" podUID="2d01089e-d6f0-4bfb-8ad1-5269e8a912fa" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.140691 4791 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(3cbd937a04c7a5113d0febd747fae616fc0f3ff160ebc98ca60b64caf6ea0d79): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.140759 4791 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(3cbd937a04c7a5113d0febd747fae616fc0f3ff160ebc98ca60b64caf6ea0d79): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.140783 4791 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(3cbd937a04c7a5113d0febd747fae616fc0f3ff160ebc98ca60b64caf6ea0d79): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:11 crc kubenswrapper[4791]: E0218 00:46:11.140837 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-dp9pl_openshift-operators(d77ea4ed-d272-4d64-83a8-b849f88861d1)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-dp9pl_openshift-operators_d77ea4ed-d272-4d64-83a8-b849f88861d1_0(3cbd937a04c7a5113d0febd747fae616fc0f3ff160ebc98ca60b64caf6ea0d79): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" podUID="d77ea4ed-d272-4d64-83a8-b849f88861d1" Feb 18 00:46:12 crc kubenswrapper[4791]: I0218 00:46:12.060755 4791 scope.go:117] "RemoveContainer" containerID="fad5ddd370dd4141f0c92d01554dafc2561baba1cf4e086c3add33e0923068b0" Feb 18 00:46:12 crc kubenswrapper[4791]: I0218 00:46:12.631573 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-d2kpn_83bdb769-59eb-4472-ba08-be5897ee2cd6/kube-multus/2.log" Feb 18 00:46:12 crc kubenswrapper[4791]: I0218 00:46:12.632115 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-d2kpn" event={"ID":"83bdb769-59eb-4472-ba08-be5897ee2cd6","Type":"ContainerStarted","Data":"f4a5d47973d2852c5ffac5f0864e40c15ceb113026e2e5c831526094a00ffe3a"} Feb 18 00:46:19 crc kubenswrapper[4791]: I0218 00:46:19.191555 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rgsmz" Feb 18 00:46:22 crc kubenswrapper[4791]: I0218 00:46:22.060885 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:22 crc kubenswrapper[4791]: I0218 00:46:22.061531 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:22 crc kubenswrapper[4791]: I0218 00:46:22.292325 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-tdc8p"] Feb 18 00:46:22 crc kubenswrapper[4791]: I0218 00:46:22.714674 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" event={"ID":"503c2fbe-9550-4943-bdf7-ce6372a20de2","Type":"ContainerStarted","Data":"e23e5656e863b9110816e6bf55b1103d4e5655de3312690ab1e10bdc42a5d4c6"} Feb 18 00:46:24 crc kubenswrapper[4791]: I0218 00:46:24.060597 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:24 crc kubenswrapper[4791]: I0218 00:46:24.061067 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" Feb 18 00:46:24 crc kubenswrapper[4791]: I0218 00:46:24.483366 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6"] Feb 18 00:46:24 crc kubenswrapper[4791]: W0218 00:46:24.492455 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9179defa_6414_46af_857c_9459169745e5.slice/crio-2992bbcd1b6831d69c1f9f083a7d13938eda4e7848eb5aeeb52ab7e1be9635ba WatchSource:0}: Error finding container 2992bbcd1b6831d69c1f9f083a7d13938eda4e7848eb5aeeb52ab7e1be9635ba: Status 404 returned error can't find the container with id 2992bbcd1b6831d69c1f9f083a7d13938eda4e7848eb5aeeb52ab7e1be9635ba Feb 18 00:46:24 crc kubenswrapper[4791]: I0218 00:46:24.727843 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" event={"ID":"9179defa-6414-46af-857c-9459169745e5","Type":"ContainerStarted","Data":"2992bbcd1b6831d69c1f9f083a7d13938eda4e7848eb5aeeb52ab7e1be9635ba"} Feb 18 00:46:25 crc kubenswrapper[4791]: I0218 00:46:25.060437 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:25 crc kubenswrapper[4791]: I0218 00:46:25.060464 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:25 crc kubenswrapper[4791]: I0218 00:46:25.061095 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:25 crc kubenswrapper[4791]: I0218 00:46:25.062229 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" Feb 18 00:46:26 crc kubenswrapper[4791]: I0218 00:46:26.060510 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:26 crc kubenswrapper[4791]: I0218 00:46:26.061130 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" Feb 18 00:46:27 crc kubenswrapper[4791]: I0218 00:46:27.792391 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j"] Feb 18 00:46:28 crc kubenswrapper[4791]: W0218 00:46:28.297802 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01b8edc9_ae6f_42d9_8f91_d7918bb3959f.slice/crio-a471db19e8ff33b3cb8846a9b3403f217ef402eecc6cd492e274218cd0f4d0bf WatchSource:0}: Error finding container a471db19e8ff33b3cb8846a9b3403f217ef402eecc6cd492e274218cd0f4d0bf: Status 404 returned error can't find the container with id a471db19e8ff33b3cb8846a9b3403f217ef402eecc6cd492e274218cd0f4d0bf Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.507955 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-dp9pl"] Feb 18 00:46:28 crc kubenswrapper[4791]: W0218 00:46:28.518537 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd77ea4ed_d272_4d64_83a8_b849f88861d1.slice/crio-da184a16282e1722d123dd921117992141a725247f1fb3aeb877abecf869e0f6 WatchSource:0}: Error finding container da184a16282e1722d123dd921117992141a725247f1fb3aeb877abecf869e0f6: Status 404 returned error can't find the container with id da184a16282e1722d123dd921117992141a725247f1fb3aeb877abecf869e0f6 Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.555811 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx"] Feb 18 00:46:28 crc kubenswrapper[4791]: W0218 00:46:28.561017 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d01089e_d6f0_4bfb_8ad1_5269e8a912fa.slice/crio-1db6812f9bcb25f97f289dc209254cf3388d7f9becedf40ba2d959ad09d57062 WatchSource:0}: Error finding container 1db6812f9bcb25f97f289dc209254cf3388d7f9becedf40ba2d959ad09d57062: Status 404 returned error can't find the container with id 1db6812f9bcb25f97f289dc209254cf3388d7f9becedf40ba2d959ad09d57062 Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.753971 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" event={"ID":"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa","Type":"ContainerStarted","Data":"1db6812f9bcb25f97f289dc209254cf3388d7f9becedf40ba2d959ad09d57062"} Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.755686 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" event={"ID":"503c2fbe-9550-4943-bdf7-ce6372a20de2","Type":"ContainerStarted","Data":"42e252c57528034d6bb4cf37ac5b613a830bbc42b74a396d2f62b9faa2605814"} Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.755916 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.757127 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" event={"ID":"d77ea4ed-d272-4d64-83a8-b849f88861d1","Type":"ContainerStarted","Data":"da184a16282e1722d123dd921117992141a725247f1fb3aeb877abecf869e0f6"} Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.758714 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" event={"ID":"01b8edc9-ae6f-42d9-8f91-d7918bb3959f","Type":"ContainerStarted","Data":"a471db19e8ff33b3cb8846a9b3403f217ef402eecc6cd492e274218cd0f4d0bf"} Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.760079 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" event={"ID":"9179defa-6414-46af-857c-9459169745e5","Type":"ContainerStarted","Data":"d3a323bbd49b754af3208d8705686b16159603c7cef445573a3639cf396bc39e"} Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.784652 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" podStartSLOduration=29.684943343 podStartE2EDuration="35.784629902s" podCreationTimestamp="2026-02-18 00:45:53 +0000 UTC" firstStartedPulling="2026-02-18 00:46:22.296577253 +0000 UTC m=+723.864590443" lastFinishedPulling="2026-02-18 00:46:28.396263832 +0000 UTC m=+729.964277002" observedRunningTime="2026-02-18 00:46:28.77547398 +0000 UTC m=+730.343487150" watchObservedRunningTime="2026-02-18 00:46:28.784629902 +0000 UTC m=+730.352643082" Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.792480 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-tdc8p" Feb 18 00:46:28 crc kubenswrapper[4791]: I0218 00:46:28.808050 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-tqvq6" podStartSLOduration=31.934047675 podStartE2EDuration="35.808033881s" podCreationTimestamp="2026-02-18 00:45:53 +0000 UTC" firstStartedPulling="2026-02-18 00:46:24.494903424 +0000 UTC m=+726.062916594" lastFinishedPulling="2026-02-18 00:46:28.36888963 +0000 UTC m=+729.936902800" observedRunningTime="2026-02-18 00:46:28.803084248 +0000 UTC m=+730.371097448" watchObservedRunningTime="2026-02-18 00:46:28.808033881 +0000 UTC m=+730.376047071" Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.776623 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" event={"ID":"01b8edc9-ae6f-42d9-8f91-d7918bb3959f","Type":"ContainerStarted","Data":"5f8538f26a739c8f17040e43fb60aad29f2cd1cf64d6d66c87a774ff9138fad9"} Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.778378 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" event={"ID":"2d01089e-d6f0-4bfb-8ad1-5269e8a912fa","Type":"ContainerStarted","Data":"0a78d4378500447d88dc9bc6567db1f573b36282e83a40ba0e232df959aab7e2"} Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.779353 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" event={"ID":"d77ea4ed-d272-4d64-83a8-b849f88861d1","Type":"ContainerStarted","Data":"50020da059ffe71453970c9301ae89718d8f895f28f41a3a396ce14b126fb250"} Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.779480 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.794446 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j" podStartSLOduration=35.870822639 podStartE2EDuration="38.794431001s" podCreationTimestamp="2026-02-18 00:45:53 +0000 UTC" firstStartedPulling="2026-02-18 00:46:28.338403433 +0000 UTC m=+729.906416603" lastFinishedPulling="2026-02-18 00:46:31.262011795 +0000 UTC m=+732.830024965" observedRunningTime="2026-02-18 00:46:31.790480639 +0000 UTC m=+733.358493829" watchObservedRunningTime="2026-02-18 00:46:31.794431001 +0000 UTC m=+733.362444171" Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.814072 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx" podStartSLOduration=36.103914179 podStartE2EDuration="38.814017722s" podCreationTimestamp="2026-02-18 00:45:53 +0000 UTC" firstStartedPulling="2026-02-18 00:46:28.563736956 +0000 UTC m=+730.131750126" lastFinishedPulling="2026-02-18 00:46:31.273840479 +0000 UTC m=+732.841853669" observedRunningTime="2026-02-18 00:46:31.810277587 +0000 UTC m=+733.378290757" watchObservedRunningTime="2026-02-18 00:46:31.814017722 +0000 UTC m=+733.382030892" Feb 18 00:46:31 crc kubenswrapper[4791]: I0218 00:46:31.831295 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" podStartSLOduration=35.093006085 podStartE2EDuration="37.831266122s" podCreationTimestamp="2026-02-18 00:45:54 +0000 UTC" firstStartedPulling="2026-02-18 00:46:28.522673185 +0000 UTC m=+730.090686355" lastFinishedPulling="2026-02-18 00:46:31.260933222 +0000 UTC m=+732.828946392" observedRunningTime="2026-02-18 00:46:31.831031815 +0000 UTC m=+733.399044985" watchObservedRunningTime="2026-02-18 00:46:31.831266122 +0000 UTC m=+733.399279292" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.690893 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.691913 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.698840 4791 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-d8gmz" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.698950 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.703182 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.707487 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.721277 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-pt89z"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.722077 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-pt89z" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.723845 4791 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-jdt2c" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.731735 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-hk8hj"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.732649 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.735139 4791 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-thd2d" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.736264 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-pt89z"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.741020 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-hk8hj"] Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.849109 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th8wr\" (UniqueName: \"kubernetes.io/projected/7dfbc107-3119-4d6c-94a7-f968eadcfa14-kube-api-access-th8wr\") pod \"cert-manager-cainjector-cf98fcc89-8fnj2\" (UID: \"7dfbc107-3119-4d6c-94a7-f968eadcfa14\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.849404 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxjp6\" (UniqueName: \"kubernetes.io/projected/d18c7aea-395c-4dc2-a8be-885128a49c08-kube-api-access-hxjp6\") pod \"cert-manager-858654f9db-pt89z\" (UID: \"d18c7aea-395c-4dc2-a8be-885128a49c08\") " pod="cert-manager/cert-manager-858654f9db-pt89z" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.849522 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8klxx\" (UniqueName: \"kubernetes.io/projected/290b1843-b2ad-42d9-ba7e-0c58aa50cc00-kube-api-access-8klxx\") pod \"cert-manager-webhook-687f57d79b-hk8hj\" (UID: \"290b1843-b2ad-42d9-ba7e-0c58aa50cc00\") " pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.951191 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8klxx\" (UniqueName: \"kubernetes.io/projected/290b1843-b2ad-42d9-ba7e-0c58aa50cc00-kube-api-access-8klxx\") pod \"cert-manager-webhook-687f57d79b-hk8hj\" (UID: \"290b1843-b2ad-42d9-ba7e-0c58aa50cc00\") " pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.951244 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th8wr\" (UniqueName: \"kubernetes.io/projected/7dfbc107-3119-4d6c-94a7-f968eadcfa14-kube-api-access-th8wr\") pod \"cert-manager-cainjector-cf98fcc89-8fnj2\" (UID: \"7dfbc107-3119-4d6c-94a7-f968eadcfa14\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.951304 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxjp6\" (UniqueName: \"kubernetes.io/projected/d18c7aea-395c-4dc2-a8be-885128a49c08-kube-api-access-hxjp6\") pod \"cert-manager-858654f9db-pt89z\" (UID: \"d18c7aea-395c-4dc2-a8be-885128a49c08\") " pod="cert-manager/cert-manager-858654f9db-pt89z" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.969424 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8klxx\" (UniqueName: \"kubernetes.io/projected/290b1843-b2ad-42d9-ba7e-0c58aa50cc00-kube-api-access-8klxx\") pod \"cert-manager-webhook-687f57d79b-hk8hj\" (UID: \"290b1843-b2ad-42d9-ba7e-0c58aa50cc00\") " pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.969446 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th8wr\" (UniqueName: \"kubernetes.io/projected/7dfbc107-3119-4d6c-94a7-f968eadcfa14-kube-api-access-th8wr\") pod \"cert-manager-cainjector-cf98fcc89-8fnj2\" (UID: \"7dfbc107-3119-4d6c-94a7-f968eadcfa14\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" Feb 18 00:46:34 crc kubenswrapper[4791]: I0218 00:46:34.971352 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxjp6\" (UniqueName: \"kubernetes.io/projected/d18c7aea-395c-4dc2-a8be-885128a49c08-kube-api-access-hxjp6\") pod \"cert-manager-858654f9db-pt89z\" (UID: \"d18c7aea-395c-4dc2-a8be-885128a49c08\") " pod="cert-manager/cert-manager-858654f9db-pt89z" Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.009381 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.039889 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-pt89z" Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.054048 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.433992 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2"] Feb 18 00:46:35 crc kubenswrapper[4791]: W0218 00:46:35.438116 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dfbc107_3119_4d6c_94a7_f968eadcfa14.slice/crio-d30cd434037715063c04e2004bb2357dfec7c0c02fb25eee7ed19d4d9d742455 WatchSource:0}: Error finding container d30cd434037715063c04e2004bb2357dfec7c0c02fb25eee7ed19d4d9d742455: Status 404 returned error can't find the container with id d30cd434037715063c04e2004bb2357dfec7c0c02fb25eee7ed19d4d9d742455 Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.485651 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-pt89z"] Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.520344 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-hk8hj"] Feb 18 00:46:35 crc kubenswrapper[4791]: W0218 00:46:35.523966 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod290b1843_b2ad_42d9_ba7e_0c58aa50cc00.slice/crio-4e42214d4d6b44d4c6457728e542b921d11a37224e481f46230bd9454bd9c2a0 WatchSource:0}: Error finding container 4e42214d4d6b44d4c6457728e542b921d11a37224e481f46230bd9454bd9c2a0: Status 404 returned error can't find the container with id 4e42214d4d6b44d4c6457728e542b921d11a37224e481f46230bd9454bd9c2a0 Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.803872 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-pt89z" event={"ID":"d18c7aea-395c-4dc2-a8be-885128a49c08","Type":"ContainerStarted","Data":"60122b21338b9cf3709d1062c1cfae9eef27ce16ed6890f65267e26c830f310c"} Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.805253 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" event={"ID":"290b1843-b2ad-42d9-ba7e-0c58aa50cc00","Type":"ContainerStarted","Data":"4e42214d4d6b44d4c6457728e542b921d11a37224e481f46230bd9454bd9c2a0"} Feb 18 00:46:35 crc kubenswrapper[4791]: I0218 00:46:35.806265 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" event={"ID":"7dfbc107-3119-4d6c-94a7-f968eadcfa14","Type":"ContainerStarted","Data":"d30cd434037715063c04e2004bb2357dfec7c0c02fb25eee7ed19d4d9d742455"} Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.841382 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-pt89z" event={"ID":"d18c7aea-395c-4dc2-a8be-885128a49c08","Type":"ContainerStarted","Data":"ac6e13f6a27113970d5030a321ee1c0c77e2df57000625718cf5ba8c68d41876"} Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.845463 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" event={"ID":"290b1843-b2ad-42d9-ba7e-0c58aa50cc00","Type":"ContainerStarted","Data":"0de5c491312dbed5873cd677491eddf5d7cb69deda8fa356d06ecc3b4847d6e9"} Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.845606 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.847113 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" event={"ID":"7dfbc107-3119-4d6c-94a7-f968eadcfa14","Type":"ContainerStarted","Data":"0beb2ff86b861329048ecdeca836dbe2e5590e58f5058194855089811fc5745f"} Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.861787 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-pt89z" podStartSLOduration=1.798141791 podStartE2EDuration="5.861770724s" podCreationTimestamp="2026-02-18 00:46:34 +0000 UTC" firstStartedPulling="2026-02-18 00:46:35.494210905 +0000 UTC m=+737.062224075" lastFinishedPulling="2026-02-18 00:46:39.557839838 +0000 UTC m=+741.125853008" observedRunningTime="2026-02-18 00:46:39.857677129 +0000 UTC m=+741.425690309" watchObservedRunningTime="2026-02-18 00:46:39.861770724 +0000 UTC m=+741.429783894" Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.882625 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" podStartSLOduration=1.798703439 podStartE2EDuration="5.882600864s" podCreationTimestamp="2026-02-18 00:46:34 +0000 UTC" firstStartedPulling="2026-02-18 00:46:35.525997042 +0000 UTC m=+737.094010212" lastFinishedPulling="2026-02-18 00:46:39.609894467 +0000 UTC m=+741.177907637" observedRunningTime="2026-02-18 00:46:39.881617304 +0000 UTC m=+741.449630474" watchObservedRunningTime="2026-02-18 00:46:39.882600864 +0000 UTC m=+741.450614044" Feb 18 00:46:39 crc kubenswrapper[4791]: I0218 00:46:39.904098 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-8fnj2" podStartSLOduration=1.781292254 podStartE2EDuration="5.904069564s" podCreationTimestamp="2026-02-18 00:46:34 +0000 UTC" firstStartedPulling="2026-02-18 00:46:35.440616089 +0000 UTC m=+737.008629259" lastFinishedPulling="2026-02-18 00:46:39.563393399 +0000 UTC m=+741.131406569" observedRunningTime="2026-02-18 00:46:39.89384937 +0000 UTC m=+741.461862550" watchObservedRunningTime="2026-02-18 00:46:39.904069564 +0000 UTC m=+741.472082754" Feb 18 00:46:44 crc kubenswrapper[4791]: I0218 00:46:44.539019 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-dp9pl" Feb 18 00:46:45 crc kubenswrapper[4791]: I0218 00:46:45.057064 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-hk8hj" Feb 18 00:47:05 crc kubenswrapper[4791]: I0218 00:47:05.413526 4791 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.592059 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857"] Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.593576 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.595315 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.604205 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857"] Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.785722 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.785776 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls7r8\" (UniqueName: \"kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.785795 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.831639 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296"] Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.832752 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.849348 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296"] Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887701 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887762 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qk8f\" (UniqueName: \"kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887789 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887846 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls7r8\" (UniqueName: \"kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887875 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.887934 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.888200 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.889296 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.906574 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls7r8\" (UniqueName: \"kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8\") pod \"371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.908438 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.988614 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.988680 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.988704 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qk8f\" (UniqueName: \"kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.989102 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:07 crc kubenswrapper[4791]: I0218 00:47:07.989230 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:08 crc kubenswrapper[4791]: I0218 00:47:08.006842 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qk8f\" (UniqueName: \"kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f\") pod \"e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:08 crc kubenswrapper[4791]: I0218 00:47:08.146977 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:08 crc kubenswrapper[4791]: I0218 00:47:08.316106 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296"] Feb 18 00:47:08 crc kubenswrapper[4791]: I0218 00:47:08.378744 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857"] Feb 18 00:47:08 crc kubenswrapper[4791]: W0218 00:47:08.393668 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6691e14_8a04_46ac_94cb_cbcfa2894bb8.slice/crio-6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95 WatchSource:0}: Error finding container 6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95: Status 404 returned error can't find the container with id 6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95 Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.033556 4791 generic.go:334] "Generic (PLEG): container finished" podID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerID="0324c4a33079a5b322092fccca8c721e983db788070e5b701df92b89eee03d8d" exitCode=0 Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.033598 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" event={"ID":"354c26d0-70dc-44dd-a18c-362d7fa5b5e9","Type":"ContainerDied","Data":"0324c4a33079a5b322092fccca8c721e983db788070e5b701df92b89eee03d8d"} Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.033644 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" event={"ID":"354c26d0-70dc-44dd-a18c-362d7fa5b5e9","Type":"ContainerStarted","Data":"dd9372672f6e88f0b38048a91953b9a1e687a4cfd4c41cd258d9747873f8449a"} Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.036130 4791 generic.go:334] "Generic (PLEG): container finished" podID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerID="6278a5b032c7646232c0aaa2bc8c244b29784ae43626e52e29394163d736e32d" exitCode=0 Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.036208 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" event={"ID":"a6691e14-8a04-46ac-94cb-cbcfa2894bb8","Type":"ContainerDied","Data":"6278a5b032c7646232c0aaa2bc8c244b29784ae43626e52e29394163d736e32d"} Feb 18 00:47:09 crc kubenswrapper[4791]: I0218 00:47:09.036248 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" event={"ID":"a6691e14-8a04-46ac-94cb-cbcfa2894bb8","Type":"ContainerStarted","Data":"6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95"} Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.050000 4791 generic.go:334] "Generic (PLEG): container finished" podID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerID="b3120263acd62d19fe96200185976c5328b21e2fa5420cba9ff2791d84ffcb15" exitCode=0 Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.050057 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" event={"ID":"354c26d0-70dc-44dd-a18c-362d7fa5b5e9","Type":"ContainerDied","Data":"b3120263acd62d19fe96200185976c5328b21e2fa5420cba9ff2791d84ffcb15"} Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.052643 4791 generic.go:334] "Generic (PLEG): container finished" podID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerID="c2c9e2bf9259ec2af218ea7a8d7457bf293095d853fcfcf07bf6174d2401f536" exitCode=0 Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.052686 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" event={"ID":"a6691e14-8a04-46ac-94cb-cbcfa2894bb8","Type":"ContainerDied","Data":"c2c9e2bf9259ec2af218ea7a8d7457bf293095d853fcfcf07bf6174d2401f536"} Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.350515 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.352582 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.360321 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.544054 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tv2l\" (UniqueName: \"kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.544148 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.544250 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.646987 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.647044 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tv2l\" (UniqueName: \"kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.647099 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.647488 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.647610 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.684121 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tv2l\" (UniqueName: \"kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l\") pod \"redhat-operators-wjrpz\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:11 crc kubenswrapper[4791]: I0218 00:47:11.972081 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:12 crc kubenswrapper[4791]: I0218 00:47:12.067461 4791 generic.go:334] "Generic (PLEG): container finished" podID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerID="314a1a3b4685bc552440c426808a3641538c4f82397ef7c35102719ab7187d62" exitCode=0 Feb 18 00:47:12 crc kubenswrapper[4791]: I0218 00:47:12.067807 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" event={"ID":"a6691e14-8a04-46ac-94cb-cbcfa2894bb8","Type":"ContainerDied","Data":"314a1a3b4685bc552440c426808a3641538c4f82397ef7c35102719ab7187d62"} Feb 18 00:47:12 crc kubenswrapper[4791]: I0218 00:47:12.075322 4791 generic.go:334] "Generic (PLEG): container finished" podID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerID="c679c2a84f0127cac2136933f688177f814986dc8a008a7e11fbede5785d3328" exitCode=0 Feb 18 00:47:12 crc kubenswrapper[4791]: I0218 00:47:12.075366 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" event={"ID":"354c26d0-70dc-44dd-a18c-362d7fa5b5e9","Type":"ContainerDied","Data":"c679c2a84f0127cac2136933f688177f814986dc8a008a7e11fbede5785d3328"} Feb 18 00:47:12 crc kubenswrapper[4791]: I0218 00:47:12.244667 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.082390 4791 generic.go:334] "Generic (PLEG): container finished" podID="804281fd-724a-482e-b71e-5b554064df31" containerID="637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2" exitCode=0 Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.082484 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerDied","Data":"637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2"} Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.082713 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerStarted","Data":"f8161cdf1be293e14de8062656ee3693d59a7221e3fb5efe28c0cb41b6391243"} Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.398501 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.414477 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586620 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle\") pod \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586687 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qk8f\" (UniqueName: \"kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f\") pod \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586742 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls7r8\" (UniqueName: \"kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8\") pod \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586791 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util\") pod \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\" (UID: \"354c26d0-70dc-44dd-a18c-362d7fa5b5e9\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586843 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle\") pod \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.586863 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util\") pod \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\" (UID: \"a6691e14-8a04-46ac-94cb-cbcfa2894bb8\") " Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.588686 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle" (OuterVolumeSpecName: "bundle") pod "354c26d0-70dc-44dd-a18c-362d7fa5b5e9" (UID: "354c26d0-70dc-44dd-a18c-362d7fa5b5e9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.590009 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle" (OuterVolumeSpecName: "bundle") pod "a6691e14-8a04-46ac-94cb-cbcfa2894bb8" (UID: "a6691e14-8a04-46ac-94cb-cbcfa2894bb8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.591543 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f" (OuterVolumeSpecName: "kube-api-access-2qk8f") pod "354c26d0-70dc-44dd-a18c-362d7fa5b5e9" (UID: "354c26d0-70dc-44dd-a18c-362d7fa5b5e9"). InnerVolumeSpecName "kube-api-access-2qk8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.601317 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util" (OuterVolumeSpecName: "util") pod "a6691e14-8a04-46ac-94cb-cbcfa2894bb8" (UID: "a6691e14-8a04-46ac-94cb-cbcfa2894bb8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.606061 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8" (OuterVolumeSpecName: "kube-api-access-ls7r8") pod "a6691e14-8a04-46ac-94cb-cbcfa2894bb8" (UID: "a6691e14-8a04-46ac-94cb-cbcfa2894bb8"). InnerVolumeSpecName "kube-api-access-ls7r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.688725 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.688775 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.688795 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.688815 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qk8f\" (UniqueName: \"kubernetes.io/projected/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-kube-api-access-2qk8f\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.688837 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls7r8\" (UniqueName: \"kubernetes.io/projected/a6691e14-8a04-46ac-94cb-cbcfa2894bb8-kube-api-access-ls7r8\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.933957 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util" (OuterVolumeSpecName: "util") pod "354c26d0-70dc-44dd-a18c-362d7fa5b5e9" (UID: "354c26d0-70dc-44dd-a18c-362d7fa5b5e9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:13 crc kubenswrapper[4791]: I0218 00:47:13.992393 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/354c26d0-70dc-44dd-a18c-362d7fa5b5e9-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.091717 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" event={"ID":"354c26d0-70dc-44dd-a18c-362d7fa5b5e9","Type":"ContainerDied","Data":"dd9372672f6e88f0b38048a91953b9a1e687a4cfd4c41cd258d9747873f8449a"} Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.091757 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd9372672f6e88f0b38048a91953b9a1e687a4cfd4c41cd258d9747873f8449a" Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.091756 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296" Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.099011 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" event={"ID":"a6691e14-8a04-46ac-94cb-cbcfa2894bb8","Type":"ContainerDied","Data":"6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95"} Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.099044 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6820d2ef53db76308c03c661cd079bb3937e71922f9fcabbd29d7f5ee755bf95" Feb 18 00:47:14 crc kubenswrapper[4791]: I0218 00:47:14.099100 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857" Feb 18 00:47:15 crc kubenswrapper[4791]: I0218 00:47:15.106716 4791 generic.go:334] "Generic (PLEG): container finished" podID="804281fd-724a-482e-b71e-5b554064df31" containerID="21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011" exitCode=0 Feb 18 00:47:15 crc kubenswrapper[4791]: I0218 00:47:15.106755 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerDied","Data":"21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011"} Feb 18 00:47:16 crc kubenswrapper[4791]: I0218 00:47:16.114133 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerStarted","Data":"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655"} Feb 18 00:47:16 crc kubenswrapper[4791]: I0218 00:47:16.134867 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wjrpz" podStartSLOduration=2.480135059 podStartE2EDuration="5.134851499s" podCreationTimestamp="2026-02-18 00:47:11 +0000 UTC" firstStartedPulling="2026-02-18 00:47:13.085800774 +0000 UTC m=+774.653813934" lastFinishedPulling="2026-02-18 00:47:15.740517204 +0000 UTC m=+777.308530374" observedRunningTime="2026-02-18 00:47:16.12904909 +0000 UTC m=+777.697062260" watchObservedRunningTime="2026-02-18 00:47:16.134851499 +0000 UTC m=+777.702864669" Feb 18 00:47:21 crc kubenswrapper[4791]: I0218 00:47:21.972333 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:21 crc kubenswrapper[4791]: I0218 00:47:21.972897 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:22 crc kubenswrapper[4791]: I0218 00:47:22.059198 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:22 crc kubenswrapper[4791]: I0218 00:47:22.237951 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:24 crc kubenswrapper[4791]: I0218 00:47:24.535995 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:24 crc kubenswrapper[4791]: I0218 00:47:24.536534 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wjrpz" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="registry-server" containerID="cri-o://da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655" gracePeriod=2 Feb 18 00:47:24 crc kubenswrapper[4791]: I0218 00:47:24.920421 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.051080 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities\") pod \"804281fd-724a-482e-b71e-5b554064df31\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.051166 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tv2l\" (UniqueName: \"kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l\") pod \"804281fd-724a-482e-b71e-5b554064df31\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.051208 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content\") pod \"804281fd-724a-482e-b71e-5b554064df31\" (UID: \"804281fd-724a-482e-b71e-5b554064df31\") " Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.051990 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities" (OuterVolumeSpecName: "utilities") pod "804281fd-724a-482e-b71e-5b554064df31" (UID: "804281fd-724a-482e-b71e-5b554064df31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.058502 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l" (OuterVolumeSpecName: "kube-api-access-5tv2l") pod "804281fd-724a-482e-b71e-5b554064df31" (UID: "804281fd-724a-482e-b71e-5b554064df31"). InnerVolumeSpecName "kube-api-access-5tv2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.152295 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.152322 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tv2l\" (UniqueName: \"kubernetes.io/projected/804281fd-724a-482e-b71e-5b554064df31-kube-api-access-5tv2l\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.168752 4791 generic.go:334] "Generic (PLEG): container finished" podID="804281fd-724a-482e-b71e-5b554064df31" containerID="da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655" exitCode=0 Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.168810 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerDied","Data":"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655"} Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.168891 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wjrpz" event={"ID":"804281fd-724a-482e-b71e-5b554064df31","Type":"ContainerDied","Data":"f8161cdf1be293e14de8062656ee3693d59a7221e3fb5efe28c0cb41b6391243"} Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.168913 4791 scope.go:117] "RemoveContainer" containerID="da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.168832 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wjrpz" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.184454 4791 scope.go:117] "RemoveContainer" containerID="21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.201418 4791 scope.go:117] "RemoveContainer" containerID="637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.217373 4791 scope.go:117] "RemoveContainer" containerID="da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.217824 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655\": container with ID starting with da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655 not found: ID does not exist" containerID="da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.217866 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655"} err="failed to get container status \"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655\": rpc error: code = NotFound desc = could not find container \"da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655\": container with ID starting with da6906119528938141e3e86c8f113e340bbcdd2cec9cd1e858bd2dfbb4f1c655 not found: ID does not exist" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.217893 4791 scope.go:117] "RemoveContainer" containerID="21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.218363 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011\": container with ID starting with 21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011 not found: ID does not exist" containerID="21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.218392 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011"} err="failed to get container status \"21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011\": rpc error: code = NotFound desc = could not find container \"21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011\": container with ID starting with 21cd3b1f36e306c76da4cd3b8aaea250d78ed54decd06f20429f7f69fd180011 not found: ID does not exist" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.218410 4791 scope.go:117] "RemoveContainer" containerID="637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.218697 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2\": container with ID starting with 637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2 not found: ID does not exist" containerID="637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.218723 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2"} err="failed to get container status \"637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2\": rpc error: code = NotFound desc = could not find container \"637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2\": container with ID starting with 637ce396e9b333951677c2aa49aafaa02b2cb13db12fa2fe30df02cabcdc34e2 not found: ID does not exist" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.427995 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj"] Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428380 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428409 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428439 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="pull" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428452 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="pull" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428469 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428481 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428502 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="extract-content" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428516 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="extract-content" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428533 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="registry-server" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428546 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="registry-server" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428570 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="extract-utilities" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428582 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="extract-utilities" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428601 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="pull" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428613 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="pull" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428638 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="util" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428650 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="util" Feb 18 00:47:25 crc kubenswrapper[4791]: E0218 00:47:25.428672 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="util" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428685 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="util" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428913 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="804281fd-724a-482e-b71e-5b554064df31" containerName="registry-server" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428931 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6691e14-8a04-46ac-94cb-cbcfa2894bb8" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.428963 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="354c26d0-70dc-44dd-a18c-362d7fa5b5e9" containerName="extract" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.430040 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.437053 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.437327 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.437433 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-5snjg" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.440827 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.443571 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.443771 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.460468 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj"] Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.559869 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.559932 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sp4z\" (UniqueName: \"kubernetes.io/projected/ea096b74-bf68-4755-8827-cbcd680241c9-kube-api-access-4sp4z\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.559969 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-apiservice-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.560112 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-webhook-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.560303 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/ea096b74-bf68-4755-8827-cbcd680241c9-manager-config\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.661413 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/ea096b74-bf68-4755-8827-cbcd680241c9-manager-config\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.661495 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.661525 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sp4z\" (UniqueName: \"kubernetes.io/projected/ea096b74-bf68-4755-8827-cbcd680241c9-kube-api-access-4sp4z\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.661548 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-apiservice-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.661573 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-webhook-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.662645 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/ea096b74-bf68-4755-8827-cbcd680241c9-manager-config\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.665172 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-apiservice-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.665412 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.667639 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ea096b74-bf68-4755-8827-cbcd680241c9-webhook-cert\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.681200 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sp4z\" (UniqueName: \"kubernetes.io/projected/ea096b74-bf68-4755-8827-cbcd680241c9-kube-api-access-4sp4z\") pod \"loki-operator-controller-manager-fb6df79fb-5x4qj\" (UID: \"ea096b74-bf68-4755-8827-cbcd680241c9\") " pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:25 crc kubenswrapper[4791]: I0218 00:47:25.746389 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.131895 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj"] Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.175145 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" event={"ID":"ea096b74-bf68-4755-8827-cbcd680241c9","Type":"ContainerStarted","Data":"cd050532f14eecfdf73737f1fec8252af7fcbc9da4c84b8be7cf07d329113fcf"} Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.383514 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "804281fd-724a-482e-b71e-5b554064df31" (UID: "804281fd-724a-482e-b71e-5b554064df31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.472269 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804281fd-724a-482e-b71e-5b554064df31-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.696177 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.702802 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wjrpz"] Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.800202 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.800269 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.848368 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-plvzd"] Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.850929 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.854321 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-g4fzt" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.856130 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.861713 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.864888 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-plvzd"] Feb 18 00:47:26 crc kubenswrapper[4791]: I0218 00:47:26.901341 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrbwc\" (UniqueName: \"kubernetes.io/projected/ae298045-e94b-47f8-8404-e29637b418ce-kube-api-access-zrbwc\") pod \"cluster-logging-operator-c769fd969-plvzd\" (UID: \"ae298045-e94b-47f8-8404-e29637b418ce\") " pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" Feb 18 00:47:27 crc kubenswrapper[4791]: I0218 00:47:27.002766 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrbwc\" (UniqueName: \"kubernetes.io/projected/ae298045-e94b-47f8-8404-e29637b418ce-kube-api-access-zrbwc\") pod \"cluster-logging-operator-c769fd969-plvzd\" (UID: \"ae298045-e94b-47f8-8404-e29637b418ce\") " pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" Feb 18 00:47:27 crc kubenswrapper[4791]: I0218 00:47:27.021481 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrbwc\" (UniqueName: \"kubernetes.io/projected/ae298045-e94b-47f8-8404-e29637b418ce-kube-api-access-zrbwc\") pod \"cluster-logging-operator-c769fd969-plvzd\" (UID: \"ae298045-e94b-47f8-8404-e29637b418ce\") " pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" Feb 18 00:47:27 crc kubenswrapper[4791]: I0218 00:47:27.070040 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="804281fd-724a-482e-b71e-5b554064df31" path="/var/lib/kubelet/pods/804281fd-724a-482e-b71e-5b554064df31/volumes" Feb 18 00:47:27 crc kubenswrapper[4791]: I0218 00:47:27.207596 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" Feb 18 00:47:27 crc kubenswrapper[4791]: I0218 00:47:27.550798 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-c769fd969-plvzd"] Feb 18 00:47:28 crc kubenswrapper[4791]: I0218 00:47:28.188738 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" event={"ID":"ae298045-e94b-47f8-8404-e29637b418ce","Type":"ContainerStarted","Data":"e57126958b760c57e729c513f6ecbe23599d3d4a36fdb4ee42181dda21545332"} Feb 18 00:47:31 crc kubenswrapper[4791]: I0218 00:47:31.209103 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" event={"ID":"ea096b74-bf68-4755-8827-cbcd680241c9","Type":"ContainerStarted","Data":"d687db124b47632b2afa7ecc54e3b6e1c5ec30b0220c5e53c80f7bc764ed8420"} Feb 18 00:47:35 crc kubenswrapper[4791]: I0218 00:47:35.233083 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" event={"ID":"ae298045-e94b-47f8-8404-e29637b418ce","Type":"ContainerStarted","Data":"a9da56caf7be88d50c5ecf219731a4db72d876c0098f173d34278aa6815edc3e"} Feb 18 00:47:35 crc kubenswrapper[4791]: I0218 00:47:35.246817 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-c769fd969-plvzd" podStartSLOduration=2.434779325 podStartE2EDuration="9.246801811s" podCreationTimestamp="2026-02-18 00:47:26 +0000 UTC" firstStartedPulling="2026-02-18 00:47:27.570191381 +0000 UTC m=+789.138204551" lastFinishedPulling="2026-02-18 00:47:34.382213867 +0000 UTC m=+795.950227037" observedRunningTime="2026-02-18 00:47:35.244569563 +0000 UTC m=+796.812582733" watchObservedRunningTime="2026-02-18 00:47:35.246801811 +0000 UTC m=+796.814814981" Feb 18 00:47:40 crc kubenswrapper[4791]: I0218 00:47:40.267581 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" event={"ID":"ea096b74-bf68-4755-8827-cbcd680241c9","Type":"ContainerStarted","Data":"fd4974022c7c2948ea41199f0f9f7c55725168ca3b1d4191951b0fa4edc3abff"} Feb 18 00:47:40 crc kubenswrapper[4791]: I0218 00:47:40.268147 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:40 crc kubenswrapper[4791]: I0218 00:47:40.272385 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" Feb 18 00:47:40 crc kubenswrapper[4791]: I0218 00:47:40.294873 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-fb6df79fb-5x4qj" podStartSLOduration=1.965092645 podStartE2EDuration="15.294849306s" podCreationTimestamp="2026-02-18 00:47:25 +0000 UTC" firstStartedPulling="2026-02-18 00:47:26.143214644 +0000 UTC m=+787.711227814" lastFinishedPulling="2026-02-18 00:47:39.472971305 +0000 UTC m=+801.040984475" observedRunningTime="2026-02-18 00:47:40.288586483 +0000 UTC m=+801.856599693" watchObservedRunningTime="2026-02-18 00:47:40.294849306 +0000 UTC m=+801.862862506" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.856820 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.858296 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.860942 4791 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-x5njv" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.861031 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.866362 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.875918 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.965909 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:43 crc kubenswrapper[4791]: I0218 00:47:43.966209 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spptz\" (UniqueName: \"kubernetes.io/projected/d4152f46-0160-4300-922a-16220a7732e4-kube-api-access-spptz\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.067302 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.067344 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spptz\" (UniqueName: \"kubernetes.io/projected/d4152f46-0160-4300-922a-16220a7732e4-kube-api-access-spptz\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.071045 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.071190 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c2bee690962af622638d76a7c96e123bdeaafca56bdb0138544209c928eab385/globalmount\"" pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.085518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spptz\" (UniqueName: \"kubernetes.io/projected/d4152f46-0160-4300-922a-16220a7732e4-kube-api-access-spptz\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.100074 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-68055992-e00c-48e7-94bb-2c66e9c854ca\") pod \"minio\" (UID: \"d4152f46-0160-4300-922a-16220a7732e4\") " pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.189360 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Feb 18 00:47:44 crc kubenswrapper[4791]: I0218 00:47:44.637018 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Feb 18 00:47:45 crc kubenswrapper[4791]: I0218 00:47:45.307135 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"d4152f46-0160-4300-922a-16220a7732e4","Type":"ContainerStarted","Data":"d517c5444f100e3925906ddb183dee1888b3ee9656939e3d4952a94924767b4e"} Feb 18 00:47:48 crc kubenswrapper[4791]: I0218 00:47:48.327752 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"d4152f46-0160-4300-922a-16220a7732e4","Type":"ContainerStarted","Data":"3822b91fb7ae7a981331c49dc335142f11f5822170fcec6c36b1fcc0e1f47322"} Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.008303 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=9.307326112 podStartE2EDuration="12.008283865s" podCreationTimestamp="2026-02-18 00:47:41 +0000 UTC" firstStartedPulling="2026-02-18 00:47:44.659309918 +0000 UTC m=+806.227323088" lastFinishedPulling="2026-02-18 00:47:47.360267671 +0000 UTC m=+808.928280841" observedRunningTime="2026-02-18 00:47:48.34385616 +0000 UTC m=+809.911869390" watchObservedRunningTime="2026-02-18 00:47:53.008283865 +0000 UTC m=+814.576297035" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.010762 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.011716 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.017057 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.017095 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.017153 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-bhfj4" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.017354 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.017234 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.033838 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.144043 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.144844 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.146726 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.147095 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.147434 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.202977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-config\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.203035 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.203077 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4d5p\" (UniqueName: \"kubernetes.io/projected/181d182c-32fc-497d-b84b-a127338caae4-kube-api-access-v4d5p\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.203113 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.203167 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.202985 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.225361 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.226632 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.231241 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.231671 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.245123 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.304715 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.305037 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.305657 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.305786 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-config\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.305891 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.306802 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4d5p\" (UniqueName: \"kubernetes.io/projected/181d182c-32fc-497d-b84b-a127338caae4-kube-api-access-v4d5p\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.306900 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8fvg\" (UniqueName: \"kubernetes.io/projected/82108d8c-ce91-4d2f-ab31-59fa33cb1813-kube-api-access-w8fvg\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.306971 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.307056 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.307125 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-config\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.307140 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.307289 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-config\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.307646 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.311908 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.313422 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/181d182c-32fc-497d-b84b-a127338caae4-logging-loki-distributor-http\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.325680 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4d5p\" (UniqueName: \"kubernetes.io/projected/181d182c-32fc-497d-b84b-a127338caae4-kube-api-access-v4d5p\") pod \"logging-loki-distributor-5d5548c9f5-t9t9m\" (UID: \"181d182c-32fc-497d-b84b-a127338caae4\") " pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.334483 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.338452 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-hbtdt"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.344492 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.348375 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-jxptn" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.349893 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.350456 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.350661 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.350677 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.351148 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.352860 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-hbtdt"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.365302 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-plgfh"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.366284 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.378916 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-plgfh"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.408577 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.408869 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.408914 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.408948 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.408977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409005 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409031 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8fvg\" (UniqueName: \"kubernetes.io/projected/82108d8c-ce91-4d2f-ab31-59fa33cb1813-kube-api-access-w8fvg\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409050 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkjw2\" (UniqueName: \"kubernetes.io/projected/49f1d891-d5f2-4c56-af09-542c600ddb15-kube-api-access-dkjw2\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409067 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-config\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409085 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-rbac\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409120 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409612 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409665 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409700 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409748 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8jw2\" (UniqueName: \"kubernetes.io/projected/53934528-3cf8-46d2-856a-1d1deef2bd01-kube-api-access-r8jw2\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.409782 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-config\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410329 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410398 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tenants\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410434 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410491 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410514 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4g2c\" (UniqueName: \"kubernetes.io/projected/33a1ba78-c845-421b-9404-1f56402fc29a-kube-api-access-n4g2c\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410531 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410552 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-rbac\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410573 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410588 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.410602 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tenants\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.411838 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-config\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.413643 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-ca-bundle\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.414344 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-http\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.421461 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-querier-grpc\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.421774 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/82108d8c-ce91-4d2f-ab31-59fa33cb1813-logging-loki-s3\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.429595 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8fvg\" (UniqueName: \"kubernetes.io/projected/82108d8c-ce91-4d2f-ab31-59fa33cb1813-kube-api-access-w8fvg\") pod \"logging-loki-querier-76bf7b6d45-mhn8q\" (UID: \"82108d8c-ce91-4d2f-ab31-59fa33cb1813\") " pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.470968 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.511594 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513143 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513188 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tenants\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513253 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513294 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513321 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513344 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513630 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513696 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkjw2\" (UniqueName: \"kubernetes.io/projected/49f1d891-d5f2-4c56-af09-542c600ddb15-kube-api-access-dkjw2\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513718 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-config\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513733 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-rbac\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513774 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.513926 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514223 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514300 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8jw2\" (UniqueName: \"kubernetes.io/projected/53934528-3cf8-46d2-856a-1d1deef2bd01-kube-api-access-r8jw2\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514355 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514382 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tenants\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514422 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514548 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4g2c\" (UniqueName: \"kubernetes.io/projected/33a1ba78-c845-421b-9404-1f56402fc29a-kube-api-access-n4g2c\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514600 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.514623 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-rbac\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.515183 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.515877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.515890 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.516610 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.516921 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: E0218 00:47:53.516991 4791 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Feb 18 00:47:53 crc kubenswrapper[4791]: E0218 00:47:53.517031 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret podName:53934528-3cf8-46d2-856a-1d1deef2bd01 nodeName:}" failed. No retries permitted until 2026-02-18 00:47:54.017017896 +0000 UTC m=+815.585031066 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret") pod "logging-loki-gateway-f46df4b65-plgfh" (UID: "53934528-3cf8-46d2-856a-1d1deef2bd01") : secret "logging-loki-gateway-http" not found Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.517999 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tenants\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.518647 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.519287 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/53934528-3cf8-46d2-856a-1d1deef2bd01-rbac\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.519337 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: E0218 00:47:53.520122 4791 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Feb 18 00:47:53 crc kubenswrapper[4791]: E0218 00:47:53.520176 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret podName:49f1d891-d5f2-4c56-af09-542c600ddb15 nodeName:}" failed. No retries permitted until 2026-02-18 00:47:54.020148733 +0000 UTC m=+815.588161903 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret") pod "logging-loki-gateway-f46df4b65-hbtdt" (UID: "49f1d891-d5f2-4c56-af09-542c600ddb15") : secret "logging-loki-gateway-http" not found Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.521023 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33a1ba78-c845-421b-9404-1f56402fc29a-config\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.521703 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-lokistack-gateway\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.525233 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.525574 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-rbac\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.526065 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tenants\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.526303 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49f1d891-d5f2-4c56-af09-542c600ddb15-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.527975 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/33a1ba78-c845-421b-9404-1f56402fc29a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.537129 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8jw2\" (UniqueName: \"kubernetes.io/projected/53934528-3cf8-46d2-856a-1d1deef2bd01-kube-api-access-r8jw2\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.538387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4g2c\" (UniqueName: \"kubernetes.io/projected/33a1ba78-c845-421b-9404-1f56402fc29a-kube-api-access-n4g2c\") pod \"logging-loki-query-frontend-6d6859c548-qlq5c\" (UID: \"33a1ba78-c845-421b-9404-1f56402fc29a\") " pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.539861 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkjw2\" (UniqueName: \"kubernetes.io/projected/49f1d891-d5f2-4c56-af09-542c600ddb15-kube-api-access-dkjw2\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.548970 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.781468 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.814722 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m"] Feb 18 00:47:53 crc kubenswrapper[4791]: I0218 00:47:53.978337 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q"] Feb 18 00:47:53 crc kubenswrapper[4791]: W0218 00:47:53.981804 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82108d8c_ce91_4d2f_ab31_59fa33cb1813.slice/crio-41bef91e571d31f6b7a4a2d6b730293445810a9ad7565a8508b854f0c66c2823 WatchSource:0}: Error finding container 41bef91e571d31f6b7a4a2d6b730293445810a9ad7565a8508b854f0c66c2823: Status 404 returned error can't find the container with id 41bef91e571d31f6b7a4a2d6b730293445810a9ad7565a8508b854f0c66c2823 Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.022632 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.022713 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.028359 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/53934528-3cf8-46d2-856a-1d1deef2bd01-tls-secret\") pod \"logging-loki-gateway-f46df4b65-plgfh\" (UID: \"53934528-3cf8-46d2-856a-1d1deef2bd01\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.028523 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/49f1d891-d5f2-4c56-af09-542c600ddb15-tls-secret\") pod \"logging-loki-gateway-f46df4b65-hbtdt\" (UID: \"49f1d891-d5f2-4c56-af09-542c600ddb15\") " pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.050488 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.148437 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.149263 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.151289 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.151606 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.161974 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.203228 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.204306 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.207029 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.208497 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.209206 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229053 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229129 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-config\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229174 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229411 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229476 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rcvn\" (UniqueName: \"kubernetes.io/projected/4c3e6c67-da97-4c01-bca7-ed995bc20255-kube-api-access-2rcvn\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.229562 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1a004a1f-41f7-48c2-a019-01148eaada91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a004a1f-41f7-48c2-a019-01148eaada91\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.281559 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.283000 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.286029 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.286185 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.289201 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.313823 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331306 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331379 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331416 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rcvn\" (UniqueName: \"kubernetes.io/projected/4c3e6c67-da97-4c01-bca7-ed995bc20255-kube-api-access-2rcvn\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331461 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331496 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-config\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331525 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1a004a1f-41f7-48c2-a019-01148eaada91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a004a1f-41f7-48c2-a019-01148eaada91\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331576 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgvmr\" (UniqueName: \"kubernetes.io/projected/853d966d-3d6c-4fbf-9c83-62148305ee9e-kube-api-access-xgvmr\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331605 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331631 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-config\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331662 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331698 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331730 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331758 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7b57720c-59f5-4806-953d-102570face10\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7b57720c-59f5-4806-953d-102570face10\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331792 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.331818 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.332947 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-config\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.333608 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.335274 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.336447 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.337405 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4c3e6c67-da97-4c01-bca7-ed995bc20255-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.349001 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.349040 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1a004a1f-41f7-48c2-a019-01148eaada91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a004a1f-41f7-48c2-a019-01148eaada91\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/54bce62c8ccdf541dc8ed031a90a26c3f3f781eadda39705f9a2bf75edab6223/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.352117 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rcvn\" (UniqueName: \"kubernetes.io/projected/4c3e6c67-da97-4c01-bca7-ed995bc20255-kube-api-access-2rcvn\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.376076 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" event={"ID":"82108d8c-ce91-4d2f-ab31-59fa33cb1813","Type":"ContainerStarted","Data":"41bef91e571d31f6b7a4a2d6b730293445810a9ad7565a8508b854f0c66c2823"} Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.376856 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" event={"ID":"181d182c-32fc-497d-b84b-a127338caae4","Type":"ContainerStarted","Data":"57af7c754229500ad4d8b0df4ef78a792bf50c70eb4302e30c10456c11074753"} Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.377603 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" event={"ID":"33a1ba78-c845-421b-9404-1f56402fc29a","Type":"ContainerStarted","Data":"117758d29f957ab183c94ae451a6ea1a4b16a9bef882759f251abb3bb28bd61c"} Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.387251 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1a004a1f-41f7-48c2-a019-01148eaada91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1a004a1f-41f7-48c2-a019-01148eaada91\") pod \"logging-loki-compactor-0\" (UID: \"4c3e6c67-da97-4c01-bca7-ed995bc20255\") " pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.433819 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgvmr\" (UniqueName: \"kubernetes.io/projected/853d966d-3d6c-4fbf-9c83-62148305ee9e-kube-api-access-xgvmr\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434336 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434382 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434422 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6drlm\" (UniqueName: \"kubernetes.io/projected/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-kube-api-access-6drlm\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434456 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434532 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434586 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434669 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434731 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434762 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7b57720c-59f5-4806-953d-102570face10\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7b57720c-59f5-4806-953d-102570face10\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434829 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434860 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-config\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434886 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.434962 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-config\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.435364 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.435952 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853d966d-3d6c-4fbf-9c83-62148305ee9e-config\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.437408 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.437437 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7b57720c-59f5-4806-953d-102570face10\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7b57720c-59f5-4806-953d-102570face10\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6da5e4a5f981e653ad6b37aafa8864762cb5b326569908d1ba004c1d34164795/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.437921 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.444819 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.444847 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/14ffdf3b43a543770e4af261b74611f85e062965b3957dfdbdb9a8393970ed04/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.447658 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.456137 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853d966d-3d6c-4fbf-9c83-62148305ee9e-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.472578 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgvmr\" (UniqueName: \"kubernetes.io/projected/853d966d-3d6c-4fbf-9c83-62148305ee9e-kube-api-access-xgvmr\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.482272 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52ec0e2f-edf7-4883-9e3e-009f43ec19b3\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.482408 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-plgfh"] Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.506429 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7b57720c-59f5-4806-953d-102570face10\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7b57720c-59f5-4806-953d-102570face10\") pod \"logging-loki-ingester-0\" (UID: \"853d966d-3d6c-4fbf-9c83-62148305ee9e\") " pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.530302 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537370 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537428 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537475 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537502 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537534 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-config\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537580 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.537598 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6drlm\" (UniqueName: \"kubernetes.io/projected/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-kube-api-access-6drlm\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.538839 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.539553 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-config\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.541283 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.541323 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/31b5c9d5b0fefb08b0609ea5955c0d8628b6db8c42f85cc747f06b3e9b4ebf3b/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.541809 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.542573 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.543724 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.555624 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6drlm\" (UniqueName: \"kubernetes.io/projected/5e9ff18e-de75-40c3-9a73-6cad6b406d1c-kube-api-access-6drlm\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.573869 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9239889-0cfa-471d-80d6-bf5c5784f035\") pod \"logging-loki-index-gateway-0\" (UID: \"5e9ff18e-de75-40c3-9a73-6cad6b406d1c\") " pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.601801 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.745030 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f46df4b65-hbtdt"] Feb 18 00:47:54 crc kubenswrapper[4791]: W0218 00:47:54.764591 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49f1d891_d5f2_4c56_af09_542c600ddb15.slice/crio-a6b4023a02273bcabcb6b7c10d40dab34628f5a5c6177e943f02a67900ebdf5f WatchSource:0}: Error finding container a6b4023a02273bcabcb6b7c10d40dab34628f5a5c6177e943f02a67900ebdf5f: Status 404 returned error can't find the container with id a6b4023a02273bcabcb6b7c10d40dab34628f5a5c6177e943f02a67900ebdf5f Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.766425 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:54 crc kubenswrapper[4791]: I0218 00:47:54.940324 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Feb 18 00:47:54 crc kubenswrapper[4791]: W0218 00:47:54.949271 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c3e6c67_da97_4c01_bca7_ed995bc20255.slice/crio-0661bf38472cfc59d3b8c95078a1ea93ecc6a01c40fa862329659b8a0edadb8f WatchSource:0}: Error finding container 0661bf38472cfc59d3b8c95078a1ea93ecc6a01c40fa862329659b8a0edadb8f: Status 404 returned error can't find the container with id 0661bf38472cfc59d3b8c95078a1ea93ecc6a01c40fa862329659b8a0edadb8f Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.024856 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Feb 18 00:47:55 crc kubenswrapper[4791]: W0218 00:47:55.029947 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod853d966d_3d6c_4fbf_9c83_62148305ee9e.slice/crio-b96445e14061b166352e3b867c5181874718acf00665106f5d4bdc4c9fc8137d WatchSource:0}: Error finding container b96445e14061b166352e3b867c5181874718acf00665106f5d4bdc4c9fc8137d: Status 404 returned error can't find the container with id b96445e14061b166352e3b867c5181874718acf00665106f5d4bdc4c9fc8137d Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.072506 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Feb 18 00:47:55 crc kubenswrapper[4791]: W0218 00:47:55.081952 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e9ff18e_de75_40c3_9a73_6cad6b406d1c.slice/crio-9d0c0d9862d8904e5167c909fcc466995a38b1f722a37135e136d0f48d0ff8a9 WatchSource:0}: Error finding container 9d0c0d9862d8904e5167c909fcc466995a38b1f722a37135e136d0f48d0ff8a9: Status 404 returned error can't find the container with id 9d0c0d9862d8904e5167c909fcc466995a38b1f722a37135e136d0f48d0ff8a9 Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.387553 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"5e9ff18e-de75-40c3-9a73-6cad6b406d1c","Type":"ContainerStarted","Data":"9d0c0d9862d8904e5167c909fcc466995a38b1f722a37135e136d0f48d0ff8a9"} Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.388826 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"853d966d-3d6c-4fbf-9c83-62148305ee9e","Type":"ContainerStarted","Data":"b96445e14061b166352e3b867c5181874718acf00665106f5d4bdc4c9fc8137d"} Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.391545 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" event={"ID":"53934528-3cf8-46d2-856a-1d1deef2bd01","Type":"ContainerStarted","Data":"fda52b518f4691c8a1d6318db0c3cf2d3d5755afba72378a2c77f5e12d9526aa"} Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.392951 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" event={"ID":"49f1d891-d5f2-4c56-af09-542c600ddb15","Type":"ContainerStarted","Data":"a6b4023a02273bcabcb6b7c10d40dab34628f5a5c6177e943f02a67900ebdf5f"} Feb 18 00:47:55 crc kubenswrapper[4791]: I0218 00:47:55.393992 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"4c3e6c67-da97-4c01-bca7-ed995bc20255","Type":"ContainerStarted","Data":"0661bf38472cfc59d3b8c95078a1ea93ecc6a01c40fa862329659b8a0edadb8f"} Feb 18 00:47:56 crc kubenswrapper[4791]: I0218 00:47:56.800330 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:47:56 crc kubenswrapper[4791]: I0218 00:47:56.800641 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.420590 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"853d966d-3d6c-4fbf-9c83-62148305ee9e","Type":"ContainerStarted","Data":"368713d6d66eb421c0308ad8162decb62ddb9c824119241474d3d81fa3453e00"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.421141 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.423520 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" event={"ID":"53934528-3cf8-46d2-856a-1d1deef2bd01","Type":"ContainerStarted","Data":"f8f7eb948be73938d28916a14029a281004202d59a1b7fbab1037cc591de79b2"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.425617 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" event={"ID":"82108d8c-ce91-4d2f-ab31-59fa33cb1813","Type":"ContainerStarted","Data":"bdc76ad657945a689a8c476de5950c334823855464dd78740b84494666229974"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.425748 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.426748 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" event={"ID":"49f1d891-d5f2-4c56-af09-542c600ddb15","Type":"ContainerStarted","Data":"540919848d9a38e8f770165bf809661685940f5d825a633dddcfbc9f4e007950"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.428132 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" event={"ID":"181d182c-32fc-497d-b84b-a127338caae4","Type":"ContainerStarted","Data":"9059786d4579365e8957a3f9ee7ed3095b68ef4f84255cfadbb602dbeb3bffb5"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.428260 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.429894 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"4c3e6c67-da97-4c01-bca7-ed995bc20255","Type":"ContainerStarted","Data":"125df8e978fbe412db261cc615bb2b34678af4b51b59ef30637473745a7486ad"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.430392 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.431858 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" event={"ID":"33a1ba78-c845-421b-9404-1f56402fc29a","Type":"ContainerStarted","Data":"8b2e69e847e2528b6a223b7cd559ab3247d660730f84afa12e656015a7431f73"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.432275 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.438172 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"5e9ff18e-de75-40c3-9a73-6cad6b406d1c","Type":"ContainerStarted","Data":"80d8a8cd430ff06f468a1c9a933594ad4410ae0365d4c87f4d1c9f2abf9de702"} Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.439194 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.447535 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=2.963674802 podStartE2EDuration="6.447511275s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:55.032755395 +0000 UTC m=+816.600768565" lastFinishedPulling="2026-02-18 00:47:58.516591868 +0000 UTC m=+820.084605038" observedRunningTime="2026-02-18 00:47:59.440729176 +0000 UTC m=+821.008742346" watchObservedRunningTime="2026-02-18 00:47:59.447511275 +0000 UTC m=+821.015524485" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.464114 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" podStartSLOduration=1.941153745 podStartE2EDuration="6.464094735s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:53.983684381 +0000 UTC m=+815.551697551" lastFinishedPulling="2026-02-18 00:47:58.506625341 +0000 UTC m=+820.074638541" observedRunningTime="2026-02-18 00:47:59.455579084 +0000 UTC m=+821.023592294" watchObservedRunningTime="2026-02-18 00:47:59.464094735 +0000 UTC m=+821.032107925" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.482905 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=2.886211677 podStartE2EDuration="6.482868284s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:54.951828434 +0000 UTC m=+816.519841604" lastFinishedPulling="2026-02-18 00:47:58.548485031 +0000 UTC m=+820.116498211" observedRunningTime="2026-02-18 00:47:59.473178275 +0000 UTC m=+821.041191455" watchObservedRunningTime="2026-02-18 00:47:59.482868284 +0000 UTC m=+821.050881464" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.524304 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" podStartSLOduration=1.849074501 podStartE2EDuration="6.524289159s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:53.793307641 +0000 UTC m=+815.361320821" lastFinishedPulling="2026-02-18 00:47:58.468522309 +0000 UTC m=+820.036535479" observedRunningTime="2026-02-18 00:47:59.521849114 +0000 UTC m=+821.089862294" watchObservedRunningTime="2026-02-18 00:47:59.524289159 +0000 UTC m=+821.092302329" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.529406 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" podStartSLOduration=2.807013605 podStartE2EDuration="7.529386995s" podCreationTimestamp="2026-02-18 00:47:52 +0000 UTC" firstStartedPulling="2026-02-18 00:47:53.824636975 +0000 UTC m=+815.392650155" lastFinishedPulling="2026-02-18 00:47:58.547010375 +0000 UTC m=+820.115023545" observedRunningTime="2026-02-18 00:47:59.502546619 +0000 UTC m=+821.070559809" watchObservedRunningTime="2026-02-18 00:47:59.529386995 +0000 UTC m=+821.097400175" Feb 18 00:47:59 crc kubenswrapper[4791]: I0218 00:47:59.537399 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.045380466 podStartE2EDuration="6.537383121s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:55.083785466 +0000 UTC m=+816.651798646" lastFinishedPulling="2026-02-18 00:47:58.575788131 +0000 UTC m=+820.143801301" observedRunningTime="2026-02-18 00:47:59.534671298 +0000 UTC m=+821.102684488" watchObservedRunningTime="2026-02-18 00:47:59.537383121 +0000 UTC m=+821.105396301" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.458144 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" event={"ID":"49f1d891-d5f2-4c56-af09-542c600ddb15","Type":"ContainerStarted","Data":"fa90170f9a35e1aa4a901cf138f0413729cf433e0bf7b58007e1a6d6675b4bde"} Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.458827 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.458849 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.459998 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" event={"ID":"53934528-3cf8-46d2-856a-1d1deef2bd01","Type":"ContainerStarted","Data":"b9525ff12a30b7117b3d768f267ca075582e5ac45d1319f69177cc8f77ac6faa"} Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.468639 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.475795 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.484218 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-f46df4b65-hbtdt" podStartSLOduration=2.6125083829999998 podStartE2EDuration="8.484198981s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:54.768217942 +0000 UTC m=+816.336231122" lastFinishedPulling="2026-02-18 00:48:00.63990854 +0000 UTC m=+822.207921720" observedRunningTime="2026-02-18 00:48:01.476289928 +0000 UTC m=+823.044303118" watchObservedRunningTime="2026-02-18 00:48:01.484198981 +0000 UTC m=+823.052212151" Feb 18 00:48:01 crc kubenswrapper[4791]: I0218 00:48:01.504721 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" podStartSLOduration=2.366326914 podStartE2EDuration="8.503818845s" podCreationTimestamp="2026-02-18 00:47:53 +0000 UTC" firstStartedPulling="2026-02-18 00:47:54.499633684 +0000 UTC m=+816.067646854" lastFinishedPulling="2026-02-18 00:48:00.637125605 +0000 UTC m=+822.205138785" observedRunningTime="2026-02-18 00:48:01.499281456 +0000 UTC m=+823.067294636" watchObservedRunningTime="2026-02-18 00:48:01.503818845 +0000 UTC m=+823.071832015" Feb 18 00:48:02 crc kubenswrapper[4791]: I0218 00:48:02.467447 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:48:02 crc kubenswrapper[4791]: I0218 00:48:02.467495 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:48:02 crc kubenswrapper[4791]: I0218 00:48:02.477560 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:48:02 crc kubenswrapper[4791]: I0218 00:48:02.485192 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f46df4b65-plgfh" Feb 18 00:48:13 crc kubenswrapper[4791]: I0218 00:48:13.346513 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5d5548c9f5-t9t9m" Feb 18 00:48:13 crc kubenswrapper[4791]: I0218 00:48:13.479691 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76bf7b6d45-mhn8q" Feb 18 00:48:13 crc kubenswrapper[4791]: I0218 00:48:13.564496 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-6d6859c548-qlq5c" Feb 18 00:48:14 crc kubenswrapper[4791]: I0218 00:48:14.538080 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Feb 18 00:48:14 crc kubenswrapper[4791]: I0218 00:48:14.610951 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Feb 18 00:48:14 crc kubenswrapper[4791]: I0218 00:48:14.786130 4791 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Feb 18 00:48:14 crc kubenswrapper[4791]: I0218 00:48:14.786723 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853d966d-3d6c-4fbf-9c83-62148305ee9e" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 00:48:24 crc kubenswrapper[4791]: I0218 00:48:24.776618 4791 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Feb 18 00:48:24 crc kubenswrapper[4791]: I0218 00:48:24.777338 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853d966d-3d6c-4fbf-9c83-62148305ee9e" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 00:48:26 crc kubenswrapper[4791]: I0218 00:48:26.799854 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:48:26 crc kubenswrapper[4791]: I0218 00:48:26.800244 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:48:26 crc kubenswrapper[4791]: I0218 00:48:26.800293 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:48:26 crc kubenswrapper[4791]: I0218 00:48:26.801001 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:48:26 crc kubenswrapper[4791]: I0218 00:48:26.801060 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca" gracePeriod=600 Feb 18 00:48:27 crc kubenswrapper[4791]: I0218 00:48:27.674689 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca" exitCode=0 Feb 18 00:48:27 crc kubenswrapper[4791]: I0218 00:48:27.674980 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca"} Feb 18 00:48:27 crc kubenswrapper[4791]: I0218 00:48:27.675004 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178"} Feb 18 00:48:27 crc kubenswrapper[4791]: I0218 00:48:27.675019 4791 scope.go:117] "RemoveContainer" containerID="c4a571b8696e216a6a4c1ca3777a5f8ef873faefd4bf24ea60bd568eedd65c7c" Feb 18 00:48:34 crc kubenswrapper[4791]: I0218 00:48:34.774249 4791 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Feb 18 00:48:34 crc kubenswrapper[4791]: I0218 00:48:34.775110 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853d966d-3d6c-4fbf-9c83-62148305ee9e" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 00:48:44 crc kubenswrapper[4791]: I0218 00:48:44.777472 4791 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Feb 18 00:48:44 crc kubenswrapper[4791]: I0218 00:48:44.778325 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853d966d-3d6c-4fbf-9c83-62148305ee9e" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 00:48:54 crc kubenswrapper[4791]: I0218 00:48:54.774291 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.844034 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-ng59q"] Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.846548 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.858204 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.858941 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.859106 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-255rq" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.873659 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.878983 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.888145 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.902527 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-ng59q"] Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.948754 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-ng59q"] Feb 18 00:49:12 crc kubenswrapper[4791]: E0218 00:49:12.949438 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-4wxd5 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-ng59q" podUID="357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999274 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999333 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999375 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999410 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999582 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999781 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:12 crc kubenswrapper[4791]: I0218 00:49:12.999828 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wxd5\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.000073 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.000106 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.000136 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.000151 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.044749 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.052373 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101335 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101408 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101502 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101519 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wxd5\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101573 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101591 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101608 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101624 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101660 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101677 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.101705 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: E0218 00:49:13.102169 4791 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.102207 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: E0218 00:49:13.102244 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver podName:357774ed-fc4d-4085-9ea8-5c5bcba1c8b1 nodeName:}" failed. No retries permitted until 2026-02-18 00:49:13.602227701 +0000 UTC m=+895.170240871 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver") pod "collector-ng59q" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1") : secret "collector-syslog-receiver" not found Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.102528 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.102776 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.103022 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.103085 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.108280 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.114425 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.114625 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.120419 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.122453 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wxd5\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.202794 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.202907 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.202954 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir" (OuterVolumeSpecName: "datadir") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.202960 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203052 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203108 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203359 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config" (OuterVolumeSpecName: "config") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203414 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203499 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.203637 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.204424 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.204451 4791 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-datadir\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.204464 4791 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-entrypoint\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.204476 4791 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.204488 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.304801 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.304884 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.305056 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.305087 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.305108 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wxd5\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.307402 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token" (OuterVolumeSpecName: "collector-token") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.307489 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token" (OuterVolumeSpecName: "sa-token") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.307628 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics" (OuterVolumeSpecName: "metrics") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.307845 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp" (OuterVolumeSpecName: "tmp") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.318336 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5" (OuterVolumeSpecName: "kube-api-access-4wxd5") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "kube-api-access-4wxd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.406865 4791 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-sa-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.406907 4791 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-tmp\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.406921 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wxd5\" (UniqueName: \"kubernetes.io/projected/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-kube-api-access-4wxd5\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.406935 4791 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-token\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.406948 4791 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-metrics\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.608780 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.612646 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") pod \"collector-ng59q\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " pod="openshift-logging/collector-ng59q" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.811457 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") pod \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\" (UID: \"357774ed-fc4d-4085-9ea8-5c5bcba1c8b1\") " Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.813930 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" (UID: "357774ed-fc4d-4085-9ea8-5c5bcba1c8b1"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:49:13 crc kubenswrapper[4791]: I0218 00:49:13.913445 4791 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.051402 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-ng59q" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.099912 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-ng59q"] Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.119473 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-ng59q"] Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.124351 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-qz8dc"] Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.125469 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.132389 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.132654 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.132908 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-255rq" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.133254 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.133317 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.137026 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-qz8dc"] Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.141761 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217404 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-metrics\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217450 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config-openshift-service-cacrt\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217471 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-entrypoint\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217495 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-syslog-receiver\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217742 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-sa-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217816 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/48f38038-db15-42ad-a1b0-9a9814248bde-datadir\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217867 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/48f38038-db15-42ad-a1b0-9a9814248bde-tmp\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217923 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-trusted-ca\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.217977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.218061 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.218107 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsjwr\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-kube-api-access-qsjwr\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.319677 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/48f38038-db15-42ad-a1b0-9a9814248bde-datadir\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.319824 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/48f38038-db15-42ad-a1b0-9a9814248bde-tmp\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.319832 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/48f38038-db15-42ad-a1b0-9a9814248bde-datadir\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.319858 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-trusted-ca\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.319973 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320044 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320091 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsjwr\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-kube-api-access-qsjwr\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320256 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-metrics\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320326 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config-openshift-service-cacrt\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320376 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-entrypoint\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320440 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-syslog-receiver\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.320545 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-sa-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.321017 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.321041 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-config-openshift-service-cacrt\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.321049 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-trusted-ca\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.321318 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/48f38038-db15-42ad-a1b0-9a9814248bde-entrypoint\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.325518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/48f38038-db15-42ad-a1b0-9a9814248bde-tmp\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.326594 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-metrics\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.330705 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.331754 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/48f38038-db15-42ad-a1b0-9a9814248bde-collector-syslog-receiver\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.339727 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsjwr\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-kube-api-access-qsjwr\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.343389 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/48f38038-db15-42ad-a1b0-9a9814248bde-sa-token\") pod \"collector-qz8dc\" (UID: \"48f38038-db15-42ad-a1b0-9a9814248bde\") " pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.472952 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-qz8dc" Feb 18 00:49:14 crc kubenswrapper[4791]: I0218 00:49:14.732460 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-qz8dc"] Feb 18 00:49:15 crc kubenswrapper[4791]: I0218 00:49:15.058536 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-qz8dc" event={"ID":"48f38038-db15-42ad-a1b0-9a9814248bde","Type":"ContainerStarted","Data":"41323fe92b94dceb6a82a1f636fe1e6b690101b100a0bdf250f5500ffe39e7a9"} Feb 18 00:49:15 crc kubenswrapper[4791]: I0218 00:49:15.069795 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="357774ed-fc4d-4085-9ea8-5c5bcba1c8b1" path="/var/lib/kubelet/pods/357774ed-fc4d-4085-9ea8-5c5bcba1c8b1/volumes" Feb 18 00:49:23 crc kubenswrapper[4791]: I0218 00:49:23.129298 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-qz8dc" event={"ID":"48f38038-db15-42ad-a1b0-9a9814248bde","Type":"ContainerStarted","Data":"76d855a114ce0de6e3db12b8403d029b6005068e4bcc4350068c69e4bfb7027f"} Feb 18 00:49:23 crc kubenswrapper[4791]: I0218 00:49:23.155184 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-qz8dc" podStartSLOduration=1.150311998 podStartE2EDuration="9.155168333s" podCreationTimestamp="2026-02-18 00:49:14 +0000 UTC" firstStartedPulling="2026-02-18 00:49:14.737838421 +0000 UTC m=+896.305851591" lastFinishedPulling="2026-02-18 00:49:22.742694756 +0000 UTC m=+904.310707926" observedRunningTime="2026-02-18 00:49:23.146826546 +0000 UTC m=+904.714839716" watchObservedRunningTime="2026-02-18 00:49:23.155168333 +0000 UTC m=+904.723181503" Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.775429 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.790816 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.829625 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.900455 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.900507 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:27 crc kubenswrapper[4791]: I0218 00:49:27.900617 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj79x\" (UniqueName: \"kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.003183 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj79x\" (UniqueName: \"kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.003469 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.004560 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.004506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.004917 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.026764 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj79x\" (UniqueName: \"kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x\") pod \"redhat-marketplace-546v8\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.125945 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:28 crc kubenswrapper[4791]: I0218 00:49:28.583800 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:28 crc kubenswrapper[4791]: W0218 00:49:28.587421 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63538587_5b26_4e2b_be28_14577e926411.slice/crio-8a38cc710b817dabe633c7c17d4786f60bf0cf0180293a18e962171870d3efac WatchSource:0}: Error finding container 8a38cc710b817dabe633c7c17d4786f60bf0cf0180293a18e962171870d3efac: Status 404 returned error can't find the container with id 8a38cc710b817dabe633c7c17d4786f60bf0cf0180293a18e962171870d3efac Feb 18 00:49:29 crc kubenswrapper[4791]: I0218 00:49:29.196580 4791 generic.go:334] "Generic (PLEG): container finished" podID="63538587-5b26-4e2b-be28-14577e926411" containerID="8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6" exitCode=0 Feb 18 00:49:29 crc kubenswrapper[4791]: I0218 00:49:29.196975 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerDied","Data":"8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6"} Feb 18 00:49:29 crc kubenswrapper[4791]: I0218 00:49:29.197068 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerStarted","Data":"8a38cc710b817dabe633c7c17d4786f60bf0cf0180293a18e962171870d3efac"} Feb 18 00:49:31 crc kubenswrapper[4791]: I0218 00:49:31.215223 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerStarted","Data":"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e"} Feb 18 00:49:32 crc kubenswrapper[4791]: I0218 00:49:32.226728 4791 generic.go:334] "Generic (PLEG): container finished" podID="63538587-5b26-4e2b-be28-14577e926411" containerID="95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e" exitCode=0 Feb 18 00:49:32 crc kubenswrapper[4791]: I0218 00:49:32.226868 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerDied","Data":"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e"} Feb 18 00:49:33 crc kubenswrapper[4791]: I0218 00:49:33.239306 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerStarted","Data":"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860"} Feb 18 00:49:33 crc kubenswrapper[4791]: I0218 00:49:33.267582 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-546v8" podStartSLOduration=2.820145473 podStartE2EDuration="6.267567552s" podCreationTimestamp="2026-02-18 00:49:27 +0000 UTC" firstStartedPulling="2026-02-18 00:49:29.201849808 +0000 UTC m=+910.769862978" lastFinishedPulling="2026-02-18 00:49:32.649271887 +0000 UTC m=+914.217285057" observedRunningTime="2026-02-18 00:49:33.263835687 +0000 UTC m=+914.831848867" watchObservedRunningTime="2026-02-18 00:49:33.267567552 +0000 UTC m=+914.835580722" Feb 18 00:49:38 crc kubenswrapper[4791]: I0218 00:49:38.126959 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:38 crc kubenswrapper[4791]: I0218 00:49:38.127476 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:38 crc kubenswrapper[4791]: I0218 00:49:38.166218 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:38 crc kubenswrapper[4791]: I0218 00:49:38.328177 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:38 crc kubenswrapper[4791]: I0218 00:49:38.395727 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.292345 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-546v8" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="registry-server" containerID="cri-o://86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860" gracePeriod=2 Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.757409 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.934907 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj79x\" (UniqueName: \"kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x\") pod \"63538587-5b26-4e2b-be28-14577e926411\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.935178 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content\") pod \"63538587-5b26-4e2b-be28-14577e926411\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.935264 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities\") pod \"63538587-5b26-4e2b-be28-14577e926411\" (UID: \"63538587-5b26-4e2b-be28-14577e926411\") " Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.936052 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities" (OuterVolumeSpecName: "utilities") pod "63538587-5b26-4e2b-be28-14577e926411" (UID: "63538587-5b26-4e2b-be28-14577e926411"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.941186 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x" (OuterVolumeSpecName: "kube-api-access-xj79x") pod "63538587-5b26-4e2b-be28-14577e926411" (UID: "63538587-5b26-4e2b-be28-14577e926411"). InnerVolumeSpecName "kube-api-access-xj79x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:49:40 crc kubenswrapper[4791]: I0218 00:49:40.967847 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63538587-5b26-4e2b-be28-14577e926411" (UID: "63538587-5b26-4e2b-be28-14577e926411"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.037265 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.037310 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63538587-5b26-4e2b-be28-14577e926411-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.037323 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj79x\" (UniqueName: \"kubernetes.io/projected/63538587-5b26-4e2b-be28-14577e926411-kube-api-access-xj79x\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.301675 4791 generic.go:334] "Generic (PLEG): container finished" podID="63538587-5b26-4e2b-be28-14577e926411" containerID="86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860" exitCode=0 Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.301713 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerDied","Data":"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860"} Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.301732 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-546v8" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.301744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-546v8" event={"ID":"63538587-5b26-4e2b-be28-14577e926411","Type":"ContainerDied","Data":"8a38cc710b817dabe633c7c17d4786f60bf0cf0180293a18e962171870d3efac"} Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.301762 4791 scope.go:117] "RemoveContainer" containerID="86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.325280 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.331191 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-546v8"] Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.331600 4791 scope.go:117] "RemoveContainer" containerID="95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.347723 4791 scope.go:117] "RemoveContainer" containerID="8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.371509 4791 scope.go:117] "RemoveContainer" containerID="86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860" Feb 18 00:49:41 crc kubenswrapper[4791]: E0218 00:49:41.372036 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860\": container with ID starting with 86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860 not found: ID does not exist" containerID="86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.372070 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860"} err="failed to get container status \"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860\": rpc error: code = NotFound desc = could not find container \"86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860\": container with ID starting with 86c8197341ecb0ca78793bb163799a9f6b84cfa628f0736d220be4666e49e860 not found: ID does not exist" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.372089 4791 scope.go:117] "RemoveContainer" containerID="95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e" Feb 18 00:49:41 crc kubenswrapper[4791]: E0218 00:49:41.372352 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e\": container with ID starting with 95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e not found: ID does not exist" containerID="95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.372376 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e"} err="failed to get container status \"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e\": rpc error: code = NotFound desc = could not find container \"95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e\": container with ID starting with 95b3d370d4414fc67d218cc882ffda4b92844b609d81386f8f8b0c9aad45130e not found: ID does not exist" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.372390 4791 scope.go:117] "RemoveContainer" containerID="8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6" Feb 18 00:49:41 crc kubenswrapper[4791]: E0218 00:49:41.372649 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6\": container with ID starting with 8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6 not found: ID does not exist" containerID="8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6" Feb 18 00:49:41 crc kubenswrapper[4791]: I0218 00:49:41.372668 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6"} err="failed to get container status \"8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6\": rpc error: code = NotFound desc = could not find container \"8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6\": container with ID starting with 8a117f49c3055e9d997e0d55c97658b0498e59b0e820390260b156ceda4ac5d6 not found: ID does not exist" Feb 18 00:49:43 crc kubenswrapper[4791]: I0218 00:49:43.071076 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63538587-5b26-4e2b-be28-14577e926411" path="/var/lib/kubelet/pods/63538587-5b26-4e2b-be28-14577e926411/volumes" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.560890 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd"] Feb 18 00:49:51 crc kubenswrapper[4791]: E0218 00:49:51.561664 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="extract-utilities" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.561677 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="extract-utilities" Feb 18 00:49:51 crc kubenswrapper[4791]: E0218 00:49:51.561697 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="extract-content" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.561704 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="extract-content" Feb 18 00:49:51 crc kubenswrapper[4791]: E0218 00:49:51.561714 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="registry-server" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.561720 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="registry-server" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.561849 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="63538587-5b26-4e2b-be28-14577e926411" containerName="registry-server" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.562828 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.565109 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.572931 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd"] Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.662879 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.662974 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.663071 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87rqr\" (UniqueName: \"kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.764269 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.764393 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87rqr\" (UniqueName: \"kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.764468 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.765231 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.765256 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.781038 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87rqr\" (UniqueName: \"kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr\") pod \"f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:51 crc kubenswrapper[4791]: I0218 00:49:51.879476 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:52 crc kubenswrapper[4791]: I0218 00:49:52.208550 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd"] Feb 18 00:49:52 crc kubenswrapper[4791]: I0218 00:49:52.380976 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerStarted","Data":"ae33b88721d842b5b34d3a834d001c4c5574c12fdcbdf8600ac7cfd5d1ad28a1"} Feb 18 00:49:52 crc kubenswrapper[4791]: I0218 00:49:52.381050 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerStarted","Data":"36704a7fcd5161e1a11f486d39375674637f36e93be7ebef1141889e18e9949e"} Feb 18 00:49:53 crc kubenswrapper[4791]: I0218 00:49:53.388551 4791 generic.go:334] "Generic (PLEG): container finished" podID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerID="ae33b88721d842b5b34d3a834d001c4c5574c12fdcbdf8600ac7cfd5d1ad28a1" exitCode=0 Feb 18 00:49:53 crc kubenswrapper[4791]: I0218 00:49:53.388639 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerDied","Data":"ae33b88721d842b5b34d3a834d001c4c5574c12fdcbdf8600ac7cfd5d1ad28a1"} Feb 18 00:49:55 crc kubenswrapper[4791]: I0218 00:49:55.404080 4791 generic.go:334] "Generic (PLEG): container finished" podID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerID="caab8cc7cbd0ae339dd1d5fa4d21cfe06f5cb43821df2d7ffee6d25ecb1b06fb" exitCode=0 Feb 18 00:49:55 crc kubenswrapper[4791]: I0218 00:49:55.404150 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerDied","Data":"caab8cc7cbd0ae339dd1d5fa4d21cfe06f5cb43821df2d7ffee6d25ecb1b06fb"} Feb 18 00:49:56 crc kubenswrapper[4791]: I0218 00:49:56.425979 4791 generic.go:334] "Generic (PLEG): container finished" podID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerID="798c2247912310c130a277cd1356eb2f1ce71d660d36ec6926c2456a8f317633" exitCode=0 Feb 18 00:49:56 crc kubenswrapper[4791]: I0218 00:49:56.426037 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerDied","Data":"798c2247912310c130a277cd1356eb2f1ce71d660d36ec6926c2456a8f317633"} Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.699779 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.749577 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87rqr\" (UniqueName: \"kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr\") pod \"4f6571ac-7426-452e-93f0-6f6d82d7bece\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.749940 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle\") pod \"4f6571ac-7426-452e-93f0-6f6d82d7bece\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.750028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util\") pod \"4f6571ac-7426-452e-93f0-6f6d82d7bece\" (UID: \"4f6571ac-7426-452e-93f0-6f6d82d7bece\") " Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.750605 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle" (OuterVolumeSpecName: "bundle") pod "4f6571ac-7426-452e-93f0-6f6d82d7bece" (UID: "4f6571ac-7426-452e-93f0-6f6d82d7bece"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.766353 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr" (OuterVolumeSpecName: "kube-api-access-87rqr") pod "4f6571ac-7426-452e-93f0-6f6d82d7bece" (UID: "4f6571ac-7426-452e-93f0-6f6d82d7bece"). InnerVolumeSpecName "kube-api-access-87rqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.772476 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util" (OuterVolumeSpecName: "util") pod "4f6571ac-7426-452e-93f0-6f6d82d7bece" (UID: "4f6571ac-7426-452e-93f0-6f6d82d7bece"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.851495 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.851527 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4f6571ac-7426-452e-93f0-6f6d82d7bece-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:57 crc kubenswrapper[4791]: I0218 00:49:57.851537 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87rqr\" (UniqueName: \"kubernetes.io/projected/4f6571ac-7426-452e-93f0-6f6d82d7bece-kube-api-access-87rqr\") on node \"crc\" DevicePath \"\"" Feb 18 00:49:58 crc kubenswrapper[4791]: I0218 00:49:58.438444 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" event={"ID":"4f6571ac-7426-452e-93f0-6f6d82d7bece","Type":"ContainerDied","Data":"36704a7fcd5161e1a11f486d39375674637f36e93be7ebef1141889e18e9949e"} Feb 18 00:49:58 crc kubenswrapper[4791]: I0218 00:49:58.438487 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36704a7fcd5161e1a11f486d39375674637f36e93be7ebef1141889e18e9949e" Feb 18 00:49:58 crc kubenswrapper[4791]: I0218 00:49:58.438501 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.327786 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-s67t6"] Feb 18 00:50:03 crc kubenswrapper[4791]: E0218 00:50:03.328629 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="extract" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.328644 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="extract" Feb 18 00:50:03 crc kubenswrapper[4791]: E0218 00:50:03.328667 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="util" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.328675 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="util" Feb 18 00:50:03 crc kubenswrapper[4791]: E0218 00:50:03.328690 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="pull" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.328698 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="pull" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.328849 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f6571ac-7426-452e-93f0-6f6d82d7bece" containerName="extract" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.329441 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.331944 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.332113 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.332581 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8bm5k" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.343794 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-s67t6"] Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.352037 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr786\" (UniqueName: \"kubernetes.io/projected/3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5-kube-api-access-cr786\") pod \"nmstate-operator-694c9596b7-s67t6\" (UID: \"3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.453545 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr786\" (UniqueName: \"kubernetes.io/projected/3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5-kube-api-access-cr786\") pod \"nmstate-operator-694c9596b7-s67t6\" (UID: \"3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.473477 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr786\" (UniqueName: \"kubernetes.io/projected/3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5-kube-api-access-cr786\") pod \"nmstate-operator-694c9596b7-s67t6\" (UID: \"3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5\") " pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" Feb 18 00:50:03 crc kubenswrapper[4791]: I0218 00:50:03.649390 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" Feb 18 00:50:04 crc kubenswrapper[4791]: I0218 00:50:04.125822 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-694c9596b7-s67t6"] Feb 18 00:50:04 crc kubenswrapper[4791]: I0218 00:50:04.487685 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" event={"ID":"3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5","Type":"ContainerStarted","Data":"35f5d5e9315cd2d9baaa380995c63109e905b46570994bbbabc25d1028cf570a"} Feb 18 00:50:07 crc kubenswrapper[4791]: I0218 00:50:07.507965 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" event={"ID":"3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5","Type":"ContainerStarted","Data":"176332b5fc0c292bf41349a2d99697604cfabd089c2431c3c0ad172a04e15ea5"} Feb 18 00:50:07 crc kubenswrapper[4791]: I0218 00:50:07.521418 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-694c9596b7-s67t6" podStartSLOduration=2.048901285 podStartE2EDuration="4.521402156s" podCreationTimestamp="2026-02-18 00:50:03 +0000 UTC" firstStartedPulling="2026-02-18 00:50:04.138755279 +0000 UTC m=+945.706768449" lastFinishedPulling="2026-02-18 00:50:06.61125615 +0000 UTC m=+948.179269320" observedRunningTime="2026-02-18 00:50:07.521172169 +0000 UTC m=+949.089185349" watchObservedRunningTime="2026-02-18 00:50:07.521402156 +0000 UTC m=+949.089415326" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.793120 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-9t458"] Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.795143 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.804011 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6jzh\" (UniqueName: \"kubernetes.io/projected/28664980-fdb0-453b-be75-c2d6c758c97e-kube-api-access-w6jzh\") pod \"nmstate-metrics-58c85c668d-9t458\" (UID: \"28664980-fdb0-453b-be75-c2d6c758c97e\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.809835 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-dglfc" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.816511 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm"] Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.817650 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.820776 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.834557 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-9t458"] Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.850241 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-b79fm"] Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.851304 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.892852 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm"] Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905449 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z748g\" (UniqueName: \"kubernetes.io/projected/d73fe398-974d-41d9-97e7-78287a263d67-kube-api-access-z748g\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905507 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-dbus-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905534 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6jzh\" (UniqueName: \"kubernetes.io/projected/28664980-fdb0-453b-be75-c2d6c758c97e-kube-api-access-w6jzh\") pod \"nmstate-metrics-58c85c668d-9t458\" (UID: \"28664980-fdb0-453b-be75-c2d6c758c97e\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905567 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2jp4\" (UniqueName: \"kubernetes.io/projected/e2e2c268-5347-4124-9bd5-276f07431108-kube-api-access-h2jp4\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905582 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-nmstate-lock\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905637 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d73fe398-974d-41d9-97e7-78287a263d67-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.905660 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-ovs-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:12 crc kubenswrapper[4791]: I0218 00:50:12.954995 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6jzh\" (UniqueName: \"kubernetes.io/projected/28664980-fdb0-453b-be75-c2d6c758c97e-kube-api-access-w6jzh\") pod \"nmstate-metrics-58c85c668d-9t458\" (UID: \"28664980-fdb0-453b-be75-c2d6c758c97e\") " pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.006902 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-dbus-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.006968 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2jp4\" (UniqueName: \"kubernetes.io/projected/e2e2c268-5347-4124-9bd5-276f07431108-kube-api-access-h2jp4\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.006991 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-nmstate-lock\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007046 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d73fe398-974d-41d9-97e7-78287a263d67-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007071 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-ovs-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007105 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z748g\" (UniqueName: \"kubernetes.io/projected/d73fe398-974d-41d9-97e7-78287a263d67-kube-api-access-z748g\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007333 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-dbus-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007345 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-nmstate-lock\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.007443 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e2e2c268-5347-4124-9bd5-276f07431108-ovs-socket\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.024066 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d73fe398-974d-41d9-97e7-78287a263d67-tls-key-pair\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.026665 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7"] Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.028143 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.031425 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-4fjtl" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.031599 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.033143 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.039881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2jp4\" (UniqueName: \"kubernetes.io/projected/e2e2c268-5347-4124-9bd5-276f07431108-kube-api-access-h2jp4\") pod \"nmstate-handler-b79fm\" (UID: \"e2e2c268-5347-4124-9bd5-276f07431108\") " pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.049389 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7"] Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.052891 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z748g\" (UniqueName: \"kubernetes.io/projected/d73fe398-974d-41d9-97e7-78287a263d67-kube-api-access-z748g\") pod \"nmstate-webhook-866bcb46dc-9zxzm\" (UID: \"d73fe398-974d-41d9-97e7-78287a263d67\") " pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.116075 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssmzx\" (UniqueName: \"kubernetes.io/projected/0f6d4b29-736a-4142-92e2-6ce0f0066a66-kube-api-access-ssmzx\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.116148 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.116180 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0f6d4b29-736a-4142-92e2-6ce0f0066a66-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.130652 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.141960 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.177703 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.192076 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.193951 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222699 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222774 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222799 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0f6d4b29-736a-4142-92e2-6ce0f0066a66-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222843 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222877 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222904 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222937 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd82k\" (UniqueName: \"kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222961 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.222985 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssmzx\" (UniqueName: \"kubernetes.io/projected/0f6d4b29-736a-4142-92e2-6ce0f0066a66-kube-api-access-ssmzx\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: E0218 00:50:13.223365 4791 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Feb 18 00:50:13 crc kubenswrapper[4791]: E0218 00:50:13.223404 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert podName:0f6d4b29-736a-4142-92e2-6ce0f0066a66 nodeName:}" failed. No retries permitted until 2026-02-18 00:50:13.723390966 +0000 UTC m=+955.291404136 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert") pod "nmstate-console-plugin-5c78fc5d65-56dx7" (UID: "0f6d4b29-736a-4142-92e2-6ce0f0066a66") : secret "plugin-serving-cert" not found Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.223544 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0f6d4b29-736a-4142-92e2-6ce0f0066a66-nginx-conf\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.223616 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.243251 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssmzx\" (UniqueName: \"kubernetes.io/projected/0f6d4b29-736a-4142-92e2-6ce0f0066a66-kube-api-access-ssmzx\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: W0218 00:50:13.250139 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2e2c268_5347_4124_9bd5_276f07431108.slice/crio-a8b862b7358f15c5101064541f6c6fd5b139beff5e47bab1254ef5c7a84da040 WatchSource:0}: Error finding container a8b862b7358f15c5101064541f6c6fd5b139beff5e47bab1254ef5c7a84da040: Status 404 returned error can't find the container with id a8b862b7358f15c5101064541f6c6fd5b139beff5e47bab1254ef5c7a84da040 Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324649 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324686 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324724 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324754 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd82k\" (UniqueName: \"kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.325914 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.326264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.324813 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.326403 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.326577 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.326923 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.331432 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.339852 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.343759 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd82k\" (UniqueName: \"kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k\") pod \"console-776c7c9864-7v9mt\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.521343 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.554140 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-b79fm" event={"ID":"e2e2c268-5347-4124-9bd5-276f07431108","Type":"ContainerStarted","Data":"a8b862b7358f15c5101064541f6c6fd5b139beff5e47bab1254ef5c7a84da040"} Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.646296 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-58c85c668d-9t458"] Feb 18 00:50:13 crc kubenswrapper[4791]: W0218 00:50:13.652807 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28664980_fdb0_453b_be75_c2d6c758c97e.slice/crio-f2eb8c3cc3fa1b12a18c38ae685ffb42358310455f911955f39365b37c51250a WatchSource:0}: Error finding container f2eb8c3cc3fa1b12a18c38ae685ffb42358310455f911955f39365b37c51250a: Status 404 returned error can't find the container with id f2eb8c3cc3fa1b12a18c38ae685ffb42358310455f911955f39365b37c51250a Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.704189 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm"] Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.734717 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.738423 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0f6d4b29-736a-4142-92e2-6ce0f0066a66-plugin-serving-cert\") pod \"nmstate-console-plugin-5c78fc5d65-56dx7\" (UID: \"0f6d4b29-736a-4142-92e2-6ce0f0066a66\") " pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:13 crc kubenswrapper[4791]: I0218 00:50:13.940507 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.005982 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" Feb 18 00:50:14 crc kubenswrapper[4791]: W0218 00:50:14.424441 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f6d4b29_736a_4142_92e2_6ce0f0066a66.slice/crio-9b820177b32b7ca244cd118ae158ead0780839f7c6baee64bb4a2a0298e8b409 WatchSource:0}: Error finding container 9b820177b32b7ca244cd118ae158ead0780839f7c6baee64bb4a2a0298e8b409: Status 404 returned error can't find the container with id 9b820177b32b7ca244cd118ae158ead0780839f7c6baee64bb4a2a0298e8b409 Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.433415 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7"] Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.562059 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" event={"ID":"0f6d4b29-736a-4142-92e2-6ce0f0066a66","Type":"ContainerStarted","Data":"9b820177b32b7ca244cd118ae158ead0780839f7c6baee64bb4a2a0298e8b409"} Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.563190 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" event={"ID":"28664980-fdb0-453b-be75-c2d6c758c97e","Type":"ContainerStarted","Data":"f2eb8c3cc3fa1b12a18c38ae685ffb42358310455f911955f39365b37c51250a"} Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.564186 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" event={"ID":"d73fe398-974d-41d9-97e7-78287a263d67","Type":"ContainerStarted","Data":"baddf63e3cfde3bddc5211d4ce1f3534f50082aa960ea9706ab73b4d44af9eee"} Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.565861 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-776c7c9864-7v9mt" event={"ID":"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0","Type":"ContainerStarted","Data":"f198e41ac37ae308322af87d21db9d60e1bd1aa3723a98ee586d6f66d01b3b26"} Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.565882 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-776c7c9864-7v9mt" event={"ID":"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0","Type":"ContainerStarted","Data":"212d19f207b05628a96c239844ddb7a3e0807a834bd2b21fb84adcd528f70724"} Feb 18 00:50:14 crc kubenswrapper[4791]: I0218 00:50:14.586689 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-776c7c9864-7v9mt" podStartSLOduration=1.5866713 podStartE2EDuration="1.5866713s" podCreationTimestamp="2026-02-18 00:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:50:14.582726749 +0000 UTC m=+956.150739919" watchObservedRunningTime="2026-02-18 00:50:14.5866713 +0000 UTC m=+956.154684480" Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.584276 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-b79fm" event={"ID":"e2e2c268-5347-4124-9bd5-276f07431108","Type":"ContainerStarted","Data":"9e0dfb1e150902fcfdd2fe56127a984973693bd68c31fce8cae6d962ff512634"} Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.584905 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.587674 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" event={"ID":"d73fe398-974d-41d9-97e7-78287a263d67","Type":"ContainerStarted","Data":"ac17be75ee83297239e59ca331491b78a7b9ea36e90c4a57ee481b81897ea6d0"} Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.587832 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.589687 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" event={"ID":"28664980-fdb0-453b-be75-c2d6c758c97e","Type":"ContainerStarted","Data":"c61eaa3cf10b759681ad3320dc06244dd34d560a833a97ef087ff3b198d90ef1"} Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.602730 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-b79fm" podStartSLOduration=1.9119559609999999 podStartE2EDuration="4.60271291s" podCreationTimestamp="2026-02-18 00:50:12 +0000 UTC" firstStartedPulling="2026-02-18 00:50:13.257615558 +0000 UTC m=+954.825628728" lastFinishedPulling="2026-02-18 00:50:15.948372507 +0000 UTC m=+957.516385677" observedRunningTime="2026-02-18 00:50:16.601792252 +0000 UTC m=+958.169805432" watchObservedRunningTime="2026-02-18 00:50:16.60271291 +0000 UTC m=+958.170726080" Feb 18 00:50:16 crc kubenswrapper[4791]: I0218 00:50:16.632886 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" podStartSLOduration=2.337015716 podStartE2EDuration="4.632866947s" podCreationTimestamp="2026-02-18 00:50:12 +0000 UTC" firstStartedPulling="2026-02-18 00:50:13.720267978 +0000 UTC m=+955.288281148" lastFinishedPulling="2026-02-18 00:50:16.016119209 +0000 UTC m=+957.584132379" observedRunningTime="2026-02-18 00:50:16.614670528 +0000 UTC m=+958.182683688" watchObservedRunningTime="2026-02-18 00:50:16.632866947 +0000 UTC m=+958.200880117" Feb 18 00:50:17 crc kubenswrapper[4791]: I0218 00:50:17.597984 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" event={"ID":"0f6d4b29-736a-4142-92e2-6ce0f0066a66","Type":"ContainerStarted","Data":"8542666409ccf907cc73c39d4d289aea1f755e892a44eed0bacc3008cd0d0041"} Feb 18 00:50:17 crc kubenswrapper[4791]: I0218 00:50:17.628597 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5c78fc5d65-56dx7" podStartSLOduration=1.8464183950000002 podStartE2EDuration="4.628574103s" podCreationTimestamp="2026-02-18 00:50:13 +0000 UTC" firstStartedPulling="2026-02-18 00:50:14.426685843 +0000 UTC m=+955.994699013" lastFinishedPulling="2026-02-18 00:50:17.208841551 +0000 UTC m=+958.776854721" observedRunningTime="2026-02-18 00:50:17.620215466 +0000 UTC m=+959.188228636" watchObservedRunningTime="2026-02-18 00:50:17.628574103 +0000 UTC m=+959.196587273" Feb 18 00:50:19 crc kubenswrapper[4791]: I0218 00:50:19.619489 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" event={"ID":"28664980-fdb0-453b-be75-c2d6c758c97e","Type":"ContainerStarted","Data":"137efbf2b0f109dafddba442c49c76daa4e699d1ea9a5c89e96420c12eb7c99c"} Feb 18 00:50:19 crc kubenswrapper[4791]: I0218 00:50:19.636484 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-58c85c668d-9t458" podStartSLOduration=2.245094881 podStartE2EDuration="7.636465823s" podCreationTimestamp="2026-02-18 00:50:12 +0000 UTC" firstStartedPulling="2026-02-18 00:50:13.655053594 +0000 UTC m=+955.223066764" lastFinishedPulling="2026-02-18 00:50:19.046424536 +0000 UTC m=+960.614437706" observedRunningTime="2026-02-18 00:50:19.632720438 +0000 UTC m=+961.200733608" watchObservedRunningTime="2026-02-18 00:50:19.636465823 +0000 UTC m=+961.204479013" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.203634 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-b79fm" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.522471 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.522514 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.535952 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.649590 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:50:23 crc kubenswrapper[4791]: I0218 00:50:23.698090 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.695210 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.698812 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.706803 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.760882 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcwgm\" (UniqueName: \"kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.761136 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.761267 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.861916 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.862042 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcwgm\" (UniqueName: \"kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.862134 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.862673 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.862976 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:27 crc kubenswrapper[4791]: I0218 00:50:27.884359 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcwgm\" (UniqueName: \"kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm\") pod \"community-operators-bwg2b\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:28 crc kubenswrapper[4791]: I0218 00:50:28.018997 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:28 crc kubenswrapper[4791]: I0218 00:50:28.564698 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:28 crc kubenswrapper[4791]: I0218 00:50:28.681730 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerStarted","Data":"0fcf0a953c94766c124a2151909b0173cf68b770e7e65bf640c592e3ff250527"} Feb 18 00:50:29 crc kubenswrapper[4791]: I0218 00:50:29.690128 4791 generic.go:334] "Generic (PLEG): container finished" podID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerID="e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300" exitCode=0 Feb 18 00:50:29 crc kubenswrapper[4791]: I0218 00:50:29.690182 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerDied","Data":"e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300"} Feb 18 00:50:31 crc kubenswrapper[4791]: I0218 00:50:31.852198 4791 generic.go:334] "Generic (PLEG): container finished" podID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerID="9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4" exitCode=0 Feb 18 00:50:31 crc kubenswrapper[4791]: I0218 00:50:31.852736 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerDied","Data":"9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4"} Feb 18 00:50:32 crc kubenswrapper[4791]: I0218 00:50:32.861333 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerStarted","Data":"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee"} Feb 18 00:50:32 crc kubenswrapper[4791]: I0218 00:50:32.888898 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bwg2b" podStartSLOduration=3.38225451 podStartE2EDuration="5.888875659s" podCreationTimestamp="2026-02-18 00:50:27 +0000 UTC" firstStartedPulling="2026-02-18 00:50:29.692755137 +0000 UTC m=+971.260768317" lastFinishedPulling="2026-02-18 00:50:32.199376296 +0000 UTC m=+973.767389466" observedRunningTime="2026-02-18 00:50:32.884097113 +0000 UTC m=+974.452110323" watchObservedRunningTime="2026-02-18 00:50:32.888875659 +0000 UTC m=+974.456888869" Feb 18 00:50:33 crc kubenswrapper[4791]: I0218 00:50:33.149765 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-866bcb46dc-9zxzm" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.088409 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.090293 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.105744 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.288214 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.288331 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.288422 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cl24m\" (UniqueName: \"kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.389611 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cl24m\" (UniqueName: \"kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.389687 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.389752 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.390144 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.390237 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.408251 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cl24m\" (UniqueName: \"kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m\") pod \"certified-operators-cgbkj\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.420529 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.861116 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:35 crc kubenswrapper[4791]: I0218 00:50:35.881587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerStarted","Data":"16d672e2fffdf66292085134d2453397b40a59dd96277ce4f88de0416a19a65b"} Feb 18 00:50:36 crc kubenswrapper[4791]: I0218 00:50:36.889596 4791 generic.go:334] "Generic (PLEG): container finished" podID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerID="aeeea8e58ee1eebecabfc73135b1433844ac911ecd4b4ea0ef6be8c43773d7d1" exitCode=0 Feb 18 00:50:36 crc kubenswrapper[4791]: I0218 00:50:36.889867 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerDied","Data":"aeeea8e58ee1eebecabfc73135b1433844ac911ecd4b4ea0ef6be8c43773d7d1"} Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.019426 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.019699 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.084886 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.906017 4791 generic.go:334] "Generic (PLEG): container finished" podID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerID="657a2047bbfd536c3abb0d5470bcc81b4be955c69fb276302594789ad69fbd41" exitCode=0 Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.906217 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerDied","Data":"657a2047bbfd536c3abb0d5470bcc81b4be955c69fb276302594789ad69fbd41"} Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.909307 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 00:50:38 crc kubenswrapper[4791]: I0218 00:50:38.953913 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:39 crc kubenswrapper[4791]: I0218 00:50:39.915246 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerStarted","Data":"26dcfd57c538f0c9b3d5b808a47edf73e5d9b1d6c2549a8f9d31eaa585f364d5"} Feb 18 00:50:39 crc kubenswrapper[4791]: I0218 00:50:39.936029 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cgbkj" podStartSLOduration=2.478812327 podStartE2EDuration="4.936013317s" podCreationTimestamp="2026-02-18 00:50:35 +0000 UTC" firstStartedPulling="2026-02-18 00:50:36.891308858 +0000 UTC m=+978.459322028" lastFinishedPulling="2026-02-18 00:50:39.348509848 +0000 UTC m=+980.916523018" observedRunningTime="2026-02-18 00:50:39.933150559 +0000 UTC m=+981.501163749" watchObservedRunningTime="2026-02-18 00:50:39.936013317 +0000 UTC m=+981.504026487" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.283607 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.284249 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bwg2b" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="registry-server" containerID="cri-o://3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee" gracePeriod=2 Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.725302 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.925835 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities\") pod \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.925986 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content\") pod \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.926051 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcwgm\" (UniqueName: \"kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm\") pod \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\" (UID: \"41c50c0f-6cef-4bc2-bec0-1815ab76cff6\") " Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.926604 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities" (OuterVolumeSpecName: "utilities") pod "41c50c0f-6cef-4bc2-bec0-1815ab76cff6" (UID: "41c50c0f-6cef-4bc2-bec0-1815ab76cff6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.936348 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm" (OuterVolumeSpecName: "kube-api-access-wcwgm") pod "41c50c0f-6cef-4bc2-bec0-1815ab76cff6" (UID: "41c50c0f-6cef-4bc2-bec0-1815ab76cff6"). InnerVolumeSpecName "kube-api-access-wcwgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.936897 4791 generic.go:334] "Generic (PLEG): container finished" podID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerID="3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee" exitCode=0 Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.936945 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwg2b" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.936952 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerDied","Data":"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee"} Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.936989 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwg2b" event={"ID":"41c50c0f-6cef-4bc2-bec0-1815ab76cff6","Type":"ContainerDied","Data":"0fcf0a953c94766c124a2151909b0173cf68b770e7e65bf640c592e3ff250527"} Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.937016 4791 scope.go:117] "RemoveContainer" containerID="3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.974606 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41c50c0f-6cef-4bc2-bec0-1815ab76cff6" (UID: "41c50c0f-6cef-4bc2-bec0-1815ab76cff6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:41 crc kubenswrapper[4791]: I0218 00:50:41.983974 4791 scope.go:117] "RemoveContainer" containerID="9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.007268 4791 scope.go:117] "RemoveContainer" containerID="e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.034119 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.034153 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcwgm\" (UniqueName: \"kubernetes.io/projected/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-kube-api-access-wcwgm\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.034175 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41c50c0f-6cef-4bc2-bec0-1815ab76cff6-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.042570 4791 scope.go:117] "RemoveContainer" containerID="3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee" Feb 18 00:50:42 crc kubenswrapper[4791]: E0218 00:50:42.044487 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee\": container with ID starting with 3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee not found: ID does not exist" containerID="3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.044525 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee"} err="failed to get container status \"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee\": rpc error: code = NotFound desc = could not find container \"3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee\": container with ID starting with 3bdb485a08371462b2017f98d921225608563ca7befebbe083a3fbdd68137dee not found: ID does not exist" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.044551 4791 scope.go:117] "RemoveContainer" containerID="9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4" Feb 18 00:50:42 crc kubenswrapper[4791]: E0218 00:50:42.044888 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4\": container with ID starting with 9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4 not found: ID does not exist" containerID="9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.044927 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4"} err="failed to get container status \"9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4\": rpc error: code = NotFound desc = could not find container \"9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4\": container with ID starting with 9ed5e5536aeb04cfabf74337be6b1a0873bdfd7452da09088b4c47f3db660dc4 not found: ID does not exist" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.044957 4791 scope.go:117] "RemoveContainer" containerID="e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300" Feb 18 00:50:42 crc kubenswrapper[4791]: E0218 00:50:42.045292 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300\": container with ID starting with e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300 not found: ID does not exist" containerID="e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.045317 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300"} err="failed to get container status \"e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300\": rpc error: code = NotFound desc = could not find container \"e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300\": container with ID starting with e3769e45ad282cb4728f40b8caa857fc030bdcee451188a2b350b5c0c777c300 not found: ID does not exist" Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.274450 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:42 crc kubenswrapper[4791]: I0218 00:50:42.284475 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bwg2b"] Feb 18 00:50:43 crc kubenswrapper[4791]: I0218 00:50:43.073670 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" path="/var/lib/kubelet/pods/41c50c0f-6cef-4bc2-bec0-1815ab76cff6/volumes" Feb 18 00:50:45 crc kubenswrapper[4791]: I0218 00:50:45.422001 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:45 crc kubenswrapper[4791]: I0218 00:50:45.422442 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:45 crc kubenswrapper[4791]: I0218 00:50:45.475591 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:46 crc kubenswrapper[4791]: I0218 00:50:46.051002 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:48 crc kubenswrapper[4791]: I0218 00:50:48.780701 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f49d7b5fb-z9n6g" podUID="4c121c1e-022d-48bd-9be0-e7c01c754103" containerName="console" containerID="cri-o://7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe" gracePeriod=15 Feb 18 00:50:48 crc kubenswrapper[4791]: I0218 00:50:48.882890 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:48 crc kubenswrapper[4791]: I0218 00:50:48.883280 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cgbkj" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="registry-server" containerID="cri-o://26dcfd57c538f0c9b3d5b808a47edf73e5d9b1d6c2549a8f9d31eaa585f364d5" gracePeriod=2 Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.670025 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f49d7b5fb-z9n6g_4c121c1e-022d-48bd-9be0-e7c01c754103/console/0.log" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.670720 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776493 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld7h8\" (UniqueName: \"kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776565 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776613 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776636 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776814 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776862 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.776903 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config\") pod \"4c121c1e-022d-48bd-9be0-e7c01c754103\" (UID: \"4c121c1e-022d-48bd-9be0-e7c01c754103\") " Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.777605 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.777623 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config" (OuterVolumeSpecName: "console-config") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.777640 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca" (OuterVolumeSpecName: "service-ca") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.778006 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.812213 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.813563 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.816565 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8" (OuterVolumeSpecName: "kube-api-access-ld7h8") pod "4c121c1e-022d-48bd-9be0-e7c01c754103" (UID: "4c121c1e-022d-48bd-9be0-e7c01c754103"). InnerVolumeSpecName "kube-api-access-ld7h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878704 4791 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878812 4791 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c121c1e-022d-48bd-9be0-e7c01c754103-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878839 4791 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-console-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878854 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld7h8\" (UniqueName: \"kubernetes.io/projected/4c121c1e-022d-48bd-9be0-e7c01c754103-kube-api-access-ld7h8\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878891 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878905 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:49 crc kubenswrapper[4791]: I0218 00:50:49.878917 4791 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4c121c1e-022d-48bd-9be0-e7c01c754103-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.024596 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f49d7b5fb-z9n6g_4c121c1e-022d-48bd-9be0-e7c01c754103/console/0.log" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.024987 4791 generic.go:334] "Generic (PLEG): container finished" podID="4c121c1e-022d-48bd-9be0-e7c01c754103" containerID="7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe" exitCode=2 Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.025063 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f49d7b5fb-z9n6g" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.025099 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f49d7b5fb-z9n6g" event={"ID":"4c121c1e-022d-48bd-9be0-e7c01c754103","Type":"ContainerDied","Data":"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe"} Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.025169 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f49d7b5fb-z9n6g" event={"ID":"4c121c1e-022d-48bd-9be0-e7c01c754103","Type":"ContainerDied","Data":"d403c292f512e38c38f0fdceb7d6afaf3b42a8afffeedeb360a809eb1516edf9"} Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.025187 4791 scope.go:117] "RemoveContainer" containerID="7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.027724 4791 generic.go:334] "Generic (PLEG): container finished" podID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerID="26dcfd57c538f0c9b3d5b808a47edf73e5d9b1d6c2549a8f9d31eaa585f364d5" exitCode=0 Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.027759 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerDied","Data":"26dcfd57c538f0c9b3d5b808a47edf73e5d9b1d6c2549a8f9d31eaa585f364d5"} Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.027781 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgbkj" event={"ID":"11bc655d-7634-4eb5-b30d-b3a5b45088a8","Type":"ContainerDied","Data":"16d672e2fffdf66292085134d2453397b40a59dd96277ce4f88de0416a19a65b"} Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.027795 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16d672e2fffdf66292085134d2453397b40a59dd96277ce4f88de0416a19a65b" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.056809 4791 scope.go:117] "RemoveContainer" containerID="7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe" Feb 18 00:50:50 crc kubenswrapper[4791]: E0218 00:50:50.057299 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe\": container with ID starting with 7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe not found: ID does not exist" containerID="7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.057352 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe"} err="failed to get container status \"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe\": rpc error: code = NotFound desc = could not find container \"7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe\": container with ID starting with 7873414ea46b713a0829501d65713404546fc17722156a16354399ab10c40cfe not found: ID does not exist" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.059483 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.081466 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities\") pod \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.081504 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content\") pod \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.081632 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cl24m\" (UniqueName: \"kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m\") pod \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\" (UID: \"11bc655d-7634-4eb5-b30d-b3a5b45088a8\") " Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.081749 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.082615 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities" (OuterVolumeSpecName: "utilities") pod "11bc655d-7634-4eb5-b30d-b3a5b45088a8" (UID: "11bc655d-7634-4eb5-b30d-b3a5b45088a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.082915 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.093771 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f49d7b5fb-z9n6g"] Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.095299 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m" (OuterVolumeSpecName: "kube-api-access-cl24m") pod "11bc655d-7634-4eb5-b30d-b3a5b45088a8" (UID: "11bc655d-7634-4eb5-b30d-b3a5b45088a8"). InnerVolumeSpecName "kube-api-access-cl24m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.136311 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "11bc655d-7634-4eb5-b30d-b3a5b45088a8" (UID: "11bc655d-7634-4eb5-b30d-b3a5b45088a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.184762 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cl24m\" (UniqueName: \"kubernetes.io/projected/11bc655d-7634-4eb5-b30d-b3a5b45088a8-kube-api-access-cl24m\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:50 crc kubenswrapper[4791]: I0218 00:50:50.184794 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11bc655d-7634-4eb5-b30d-b3a5b45088a8-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:51 crc kubenswrapper[4791]: I0218 00:50:51.048550 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgbkj" Feb 18 00:50:51 crc kubenswrapper[4791]: I0218 00:50:51.087915 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c121c1e-022d-48bd-9be0-e7c01c754103" path="/var/lib/kubelet/pods/4c121c1e-022d-48bd-9be0-e7c01c754103/volumes" Feb 18 00:50:51 crc kubenswrapper[4791]: I0218 00:50:51.098739 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:51 crc kubenswrapper[4791]: I0218 00:50:51.109524 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cgbkj"] Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921101 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2"] Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921670 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="extract-utilities" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921681 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="extract-utilities" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921696 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="extract-content" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921702 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="extract-content" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921712 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="extract-utilities" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921718 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="extract-utilities" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921734 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="extract-content" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921740 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="extract-content" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921751 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921758 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921770 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921775 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: E0218 00:50:52.921785 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c121c1e-022d-48bd-9be0-e7c01c754103" containerName="console" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921791 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c121c1e-022d-48bd-9be0-e7c01c754103" containerName="console" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921917 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921927 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="41c50c0f-6cef-4bc2-bec0-1815ab76cff6" containerName="registry-server" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.921937 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c121c1e-022d-48bd-9be0-e7c01c754103" containerName="console" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.922939 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.924789 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Feb 18 00:50:52 crc kubenswrapper[4791]: I0218 00:50:52.929957 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2"] Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.025624 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.025706 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.025894 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvpvr\" (UniqueName: \"kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.072278 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11bc655d-7634-4eb5-b30d-b3a5b45088a8" path="/var/lib/kubelet/pods/11bc655d-7634-4eb5-b30d-b3a5b45088a8/volumes" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.127263 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvpvr\" (UniqueName: \"kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.128402 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.128900 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.129074 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.129491 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.158331 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvpvr\" (UniqueName: \"kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr\") pod \"a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.250848 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:53 crc kubenswrapper[4791]: I0218 00:50:53.730844 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2"] Feb 18 00:50:54 crc kubenswrapper[4791]: I0218 00:50:54.067886 4791 generic.go:334] "Generic (PLEG): container finished" podID="40f169fa-a16d-48fd-aca9-881183df5077" containerID="cd5f9e1229da7d4970faa0d78d4fd93931afb3ee9612a8d13566e9e84feae439" exitCode=0 Feb 18 00:50:54 crc kubenswrapper[4791]: I0218 00:50:54.068081 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" event={"ID":"40f169fa-a16d-48fd-aca9-881183df5077","Type":"ContainerDied","Data":"cd5f9e1229da7d4970faa0d78d4fd93931afb3ee9612a8d13566e9e84feae439"} Feb 18 00:50:54 crc kubenswrapper[4791]: I0218 00:50:54.068211 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" event={"ID":"40f169fa-a16d-48fd-aca9-881183df5077","Type":"ContainerStarted","Data":"6a32f43da9116e15a76df15d5be3c6075870b0a3e1635a065f1c0da32a187b1a"} Feb 18 00:50:56 crc kubenswrapper[4791]: I0218 00:50:56.088098 4791 generic.go:334] "Generic (PLEG): container finished" podID="40f169fa-a16d-48fd-aca9-881183df5077" containerID="f298d891208b9673eb3e34091916968a8665bdba62f6de3a0f3e9f0ff7ab2ed1" exitCode=0 Feb 18 00:50:56 crc kubenswrapper[4791]: I0218 00:50:56.088133 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" event={"ID":"40f169fa-a16d-48fd-aca9-881183df5077","Type":"ContainerDied","Data":"f298d891208b9673eb3e34091916968a8665bdba62f6de3a0f3e9f0ff7ab2ed1"} Feb 18 00:50:56 crc kubenswrapper[4791]: I0218 00:50:56.799723 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:50:56 crc kubenswrapper[4791]: I0218 00:50:56.800057 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:50:57 crc kubenswrapper[4791]: I0218 00:50:57.101814 4791 generic.go:334] "Generic (PLEG): container finished" podID="40f169fa-a16d-48fd-aca9-881183df5077" containerID="8dd45d4c22a31406e88bf929799783729ebe1ba8f75484d20d574a2cdce9b64f" exitCode=0 Feb 18 00:50:57 crc kubenswrapper[4791]: I0218 00:50:57.101881 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" event={"ID":"40f169fa-a16d-48fd-aca9-881183df5077","Type":"ContainerDied","Data":"8dd45d4c22a31406e88bf929799783729ebe1ba8f75484d20d574a2cdce9b64f"} Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.387728 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.411226 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvpvr\" (UniqueName: \"kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr\") pod \"40f169fa-a16d-48fd-aca9-881183df5077\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.411408 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle\") pod \"40f169fa-a16d-48fd-aca9-881183df5077\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.411578 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util\") pod \"40f169fa-a16d-48fd-aca9-881183df5077\" (UID: \"40f169fa-a16d-48fd-aca9-881183df5077\") " Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.412370 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle" (OuterVolumeSpecName: "bundle") pod "40f169fa-a16d-48fd-aca9-881183df5077" (UID: "40f169fa-a16d-48fd-aca9-881183df5077"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.419377 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr" (OuterVolumeSpecName: "kube-api-access-nvpvr") pod "40f169fa-a16d-48fd-aca9-881183df5077" (UID: "40f169fa-a16d-48fd-aca9-881183df5077"). InnerVolumeSpecName "kube-api-access-nvpvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.425104 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util" (OuterVolumeSpecName: "util") pod "40f169fa-a16d-48fd-aca9-881183df5077" (UID: "40f169fa-a16d-48fd-aca9-881183df5077"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.513295 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.513340 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvpvr\" (UniqueName: \"kubernetes.io/projected/40f169fa-a16d-48fd-aca9-881183df5077-kube-api-access-nvpvr\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:58 crc kubenswrapper[4791]: I0218 00:50:58.513360 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/40f169fa-a16d-48fd-aca9-881183df5077-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:50:59 crc kubenswrapper[4791]: I0218 00:50:59.116082 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" event={"ID":"40f169fa-a16d-48fd-aca9-881183df5077","Type":"ContainerDied","Data":"6a32f43da9116e15a76df15d5be3c6075870b0a3e1635a065f1c0da32a187b1a"} Feb 18 00:50:59 crc kubenswrapper[4791]: I0218 00:50:59.116124 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a32f43da9116e15a76df15d5be3c6075870b0a3e1635a065f1c0da32a187b1a" Feb 18 00:50:59 crc kubenswrapper[4791]: I0218 00:50:59.116225 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.582367 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26"] Feb 18 00:51:12 crc kubenswrapper[4791]: E0218 00:51:12.583001 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="pull" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.583011 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="pull" Feb 18 00:51:12 crc kubenswrapper[4791]: E0218 00:51:12.583026 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="util" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.583032 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="util" Feb 18 00:51:12 crc kubenswrapper[4791]: E0218 00:51:12.583057 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="extract" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.583079 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="extract" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.583204 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f169fa-a16d-48fd-aca9-881183df5077" containerName="extract" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.583686 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.586327 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.586508 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.586812 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.586939 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.587026 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-m95kz" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.603129 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26"] Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.630077 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lns7\" (UniqueName: \"kubernetes.io/projected/1d334193-b713-438c-a889-9a58c82e980d-kube-api-access-5lns7\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.630441 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-webhook-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.630601 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-apiservice-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.732222 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-webhook-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.732292 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-apiservice-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.732379 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lns7\" (UniqueName: \"kubernetes.io/projected/1d334193-b713-438c-a889-9a58c82e980d-kube-api-access-5lns7\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.750018 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-apiservice-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.750057 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1d334193-b713-438c-a889-9a58c82e980d-webhook-cert\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.754695 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lns7\" (UniqueName: \"kubernetes.io/projected/1d334193-b713-438c-a889-9a58c82e980d-kube-api-access-5lns7\") pod \"metallb-operator-controller-manager-f9fdbc69b-szb26\" (UID: \"1d334193-b713-438c-a889-9a58c82e980d\") " pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.815404 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h"] Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.816318 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.818220 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.818322 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.818522 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zmnc2" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.833995 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqdqg\" (UniqueName: \"kubernetes.io/projected/e884fd03-f3e4-4a90-833a-af7afbf80be3-kube-api-access-gqdqg\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.834084 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-webhook-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.834145 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-apiservice-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.836281 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h"] Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.905065 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.935118 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-apiservice-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.935189 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqdqg\" (UniqueName: \"kubernetes.io/projected/e884fd03-f3e4-4a90-833a-af7afbf80be3-kube-api-access-gqdqg\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.935256 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-webhook-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.939088 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-apiservice-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.939502 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e884fd03-f3e4-4a90-833a-af7afbf80be3-webhook-cert\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:12 crc kubenswrapper[4791]: I0218 00:51:12.955914 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqdqg\" (UniqueName: \"kubernetes.io/projected/e884fd03-f3e4-4a90-833a-af7afbf80be3-kube-api-access-gqdqg\") pod \"metallb-operator-webhook-server-67d9bcb746-9bq8h\" (UID: \"e884fd03-f3e4-4a90-833a-af7afbf80be3\") " pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:13 crc kubenswrapper[4791]: I0218 00:51:13.132722 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:13 crc kubenswrapper[4791]: I0218 00:51:13.381739 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26"] Feb 18 00:51:13 crc kubenswrapper[4791]: W0218 00:51:13.384502 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d334193_b713_438c_a889_9a58c82e980d.slice/crio-8c3e1ee7181ab7dcbc286b4bc42be919c99b3d175fe7533abff8ff4da55ec4e3 WatchSource:0}: Error finding container 8c3e1ee7181ab7dcbc286b4bc42be919c99b3d175fe7533abff8ff4da55ec4e3: Status 404 returned error can't find the container with id 8c3e1ee7181ab7dcbc286b4bc42be919c99b3d175fe7533abff8ff4da55ec4e3 Feb 18 00:51:13 crc kubenswrapper[4791]: I0218 00:51:13.603901 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h"] Feb 18 00:51:13 crc kubenswrapper[4791]: W0218 00:51:13.614046 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode884fd03_f3e4_4a90_833a_af7afbf80be3.slice/crio-ecc85c34704a4957e538868ce85c4749db1a160a59b6335b90d054b4351ddbaa WatchSource:0}: Error finding container ecc85c34704a4957e538868ce85c4749db1a160a59b6335b90d054b4351ddbaa: Status 404 returned error can't find the container with id ecc85c34704a4957e538868ce85c4749db1a160a59b6335b90d054b4351ddbaa Feb 18 00:51:14 crc kubenswrapper[4791]: I0218 00:51:14.243489 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" event={"ID":"1d334193-b713-438c-a889-9a58c82e980d","Type":"ContainerStarted","Data":"8c3e1ee7181ab7dcbc286b4bc42be919c99b3d175fe7533abff8ff4da55ec4e3"} Feb 18 00:51:14 crc kubenswrapper[4791]: I0218 00:51:14.245430 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" event={"ID":"e884fd03-f3e4-4a90-833a-af7afbf80be3","Type":"ContainerStarted","Data":"ecc85c34704a4957e538868ce85c4749db1a160a59b6335b90d054b4351ddbaa"} Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.304405 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" event={"ID":"1d334193-b713-438c-a889-9a58c82e980d","Type":"ContainerStarted","Data":"1737a28b0303f4f9781b697bea4bf1d46e8d987b8135df22db4f936ae9b8e865"} Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.304960 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.307089 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" event={"ID":"e884fd03-f3e4-4a90-833a-af7afbf80be3","Type":"ContainerStarted","Data":"42c7e7010533b114f29892800a85ff3d607f2b071035eb9dd21efb1040df5489"} Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.307791 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.334069 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" podStartSLOduration=2.505607729 podStartE2EDuration="8.334053336s" podCreationTimestamp="2026-02-18 00:51:12 +0000 UTC" firstStartedPulling="2026-02-18 00:51:13.386572152 +0000 UTC m=+1014.954585312" lastFinishedPulling="2026-02-18 00:51:19.215017749 +0000 UTC m=+1020.783030919" observedRunningTime="2026-02-18 00:51:20.329361982 +0000 UTC m=+1021.897375152" watchObservedRunningTime="2026-02-18 00:51:20.334053336 +0000 UTC m=+1021.902066506" Feb 18 00:51:20 crc kubenswrapper[4791]: I0218 00:51:20.350181 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" podStartSLOduration=2.750390404 podStartE2EDuration="8.350150091s" podCreationTimestamp="2026-02-18 00:51:12 +0000 UTC" firstStartedPulling="2026-02-18 00:51:13.616958034 +0000 UTC m=+1015.184971204" lastFinishedPulling="2026-02-18 00:51:19.216717721 +0000 UTC m=+1020.784730891" observedRunningTime="2026-02-18 00:51:20.349475701 +0000 UTC m=+1021.917488871" watchObservedRunningTime="2026-02-18 00:51:20.350150091 +0000 UTC m=+1021.918163261" Feb 18 00:51:26 crc kubenswrapper[4791]: I0218 00:51:26.800332 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:51:26 crc kubenswrapper[4791]: I0218 00:51:26.800913 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:51:33 crc kubenswrapper[4791]: I0218 00:51:33.137769 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-67d9bcb746-9bq8h" Feb 18 00:51:52 crc kubenswrapper[4791]: I0218 00:51:52.907748 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-f9fdbc69b-szb26" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.708347 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-gkll6"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.713513 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.716475 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.717389 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.718436 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-4hrsm" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.720406 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.721830 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.724061 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.733439 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809020 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics-certs\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809081 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flv6p\" (UniqueName: \"kubernetes.io/projected/799e6275-aff5-49be-9af2-3f4187055abc-kube-api-access-flv6p\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809104 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-conf\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809299 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-reloader\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809366 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxbbw\" (UniqueName: \"kubernetes.io/projected/a5d9d585-69e8-4028-9939-a3c5e1f875fe-kube-api-access-bxbbw\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809402 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-startup\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809442 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-sockets\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809545 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.809585 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/799e6275-aff5-49be-9af2-3f4187055abc-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.819026 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-tc6ln"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.820208 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tc6ln" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.822891 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.823035 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.823180 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.823297 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-zmbbk" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.848181 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-69bbfbf88f-7c2t9"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.849406 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.851396 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.859179 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-69bbfbf88f-7c2t9"] Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910236 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-reloader\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910504 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxbbw\" (UniqueName: \"kubernetes.io/projected/a5d9d585-69e8-4028-9939-a3c5e1f875fe-kube-api-access-bxbbw\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910527 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-startup\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910551 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-sockets\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910587 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-reloader\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910602 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910705 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/799e6275-aff5-49be-9af2-3f4187055abc-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910743 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smp6m\" (UniqueName: \"kubernetes.io/projected/2da8a059-b58c-4390-b09c-87158939e07f-kube-api-access-smp6m\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910761 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2da8a059-b58c-4390-b09c-87158939e07f-metallb-excludel2\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910789 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-cert\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910814 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910857 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-metrics-certs\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910882 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics-certs\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910900 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-metrics-certs\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.910971 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-sockets\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911033 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rd64\" (UniqueName: \"kubernetes.io/projected/2168cd4c-c18b-4d6e-be03-24a4749b9c66-kube-api-access-7rd64\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911093 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flv6p\" (UniqueName: \"kubernetes.io/projected/799e6275-aff5-49be-9af2-3f4187055abc-kube-api-access-flv6p\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911115 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-conf\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911411 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-conf\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911610 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.911615 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a5d9d585-69e8-4028-9939-a3c5e1f875fe-frr-startup\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.916387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a5d9d585-69e8-4028-9939-a3c5e1f875fe-metrics-certs\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.922823 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/799e6275-aff5-49be-9af2-3f4187055abc-cert\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.928518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flv6p\" (UniqueName: \"kubernetes.io/projected/799e6275-aff5-49be-9af2-3f4187055abc-kube-api-access-flv6p\") pod \"frr-k8s-webhook-server-78b44bf5bb-h4g9l\" (UID: \"799e6275-aff5-49be-9af2-3f4187055abc\") " pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:53 crc kubenswrapper[4791]: I0218 00:51:53.933518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxbbw\" (UniqueName: \"kubernetes.io/projected/a5d9d585-69e8-4028-9939-a3c5e1f875fe-kube-api-access-bxbbw\") pod \"frr-k8s-gkll6\" (UID: \"a5d9d585-69e8-4028-9939-a3c5e1f875fe\") " pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011759 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smp6m\" (UniqueName: \"kubernetes.io/projected/2da8a059-b58c-4390-b09c-87158939e07f-kube-api-access-smp6m\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011798 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2da8a059-b58c-4390-b09c-87158939e07f-metallb-excludel2\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011819 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-cert\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011837 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011861 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-metrics-certs\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011879 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-metrics-certs\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.011896 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rd64\" (UniqueName: \"kubernetes.io/projected/2168cd4c-c18b-4d6e-be03-24a4749b9c66-kube-api-access-7rd64\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: E0218 00:51:54.012004 4791 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 18 00:51:54 crc kubenswrapper[4791]: E0218 00:51:54.012080 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist podName:2da8a059-b58c-4390-b09c-87158939e07f nodeName:}" failed. No retries permitted until 2026-02-18 00:51:54.512063149 +0000 UTC m=+1056.080076319 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist") pod "speaker-tc6ln" (UID: "2da8a059-b58c-4390-b09c-87158939e07f") : secret "metallb-memberlist" not found Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.012593 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2da8a059-b58c-4390-b09c-87158939e07f-metallb-excludel2\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.013997 4791 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.027110 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-metrics-certs\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.027230 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-metrics-certs\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.028866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2168cd4c-c18b-4d6e-be03-24a4749b9c66-cert\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.029525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smp6m\" (UniqueName: \"kubernetes.io/projected/2da8a059-b58c-4390-b09c-87158939e07f-kube-api-access-smp6m\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.030876 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rd64\" (UniqueName: \"kubernetes.io/projected/2168cd4c-c18b-4d6e-be03-24a4749b9c66-kube-api-access-7rd64\") pod \"controller-69bbfbf88f-7c2t9\" (UID: \"2168cd4c-c18b-4d6e-be03-24a4749b9c66\") " pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.036683 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.050656 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.167920 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.518084 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:54 crc kubenswrapper[4791]: E0218 00:51:54.518235 4791 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Feb 18 00:51:54 crc kubenswrapper[4791]: E0218 00:51:54.518288 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist podName:2da8a059-b58c-4390-b09c-87158939e07f nodeName:}" failed. No retries permitted until 2026-02-18 00:51:55.518274877 +0000 UTC m=+1057.086288047 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist") pod "speaker-tc6ln" (UID: "2da8a059-b58c-4390-b09c-87158939e07f") : secret "metallb-memberlist" not found Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.522492 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l"] Feb 18 00:51:54 crc kubenswrapper[4791]: W0218 00:51:54.522613 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod799e6275_aff5_49be_9af2_3f4187055abc.slice/crio-811e71dec9bbc795d15db3d738974f46da6d815adcbbf5549fa05a48cd138f55 WatchSource:0}: Error finding container 811e71dec9bbc795d15db3d738974f46da6d815adcbbf5549fa05a48cd138f55: Status 404 returned error can't find the container with id 811e71dec9bbc795d15db3d738974f46da6d815adcbbf5549fa05a48cd138f55 Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.569507 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" event={"ID":"799e6275-aff5-49be-9af2-3f4187055abc","Type":"ContainerStarted","Data":"811e71dec9bbc795d15db3d738974f46da6d815adcbbf5549fa05a48cd138f55"} Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.570561 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"569e4b4e4504e497fc67fac7d759182d847b14c8ba31e139b8ec8f4d7557e560"} Feb 18 00:51:54 crc kubenswrapper[4791]: I0218 00:51:54.643843 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-69bbfbf88f-7c2t9"] Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.534326 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.547871 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2da8a059-b58c-4390-b09c-87158939e07f-memberlist\") pod \"speaker-tc6ln\" (UID: \"2da8a059-b58c-4390-b09c-87158939e07f\") " pod="metallb-system/speaker-tc6ln" Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.582233 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-7c2t9" event={"ID":"2168cd4c-c18b-4d6e-be03-24a4749b9c66","Type":"ContainerStarted","Data":"60492b0c58f3530c0fd60624dba233eef3e39c94b461760ec4bc54ed8adf80fe"} Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.582282 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-7c2t9" event={"ID":"2168cd4c-c18b-4d6e-be03-24a4749b9c66","Type":"ContainerStarted","Data":"7df71ca7eb173e1e2b04c893bf4337302e61205007cae22129779c3f6869e0e7"} Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.582292 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-69bbfbf88f-7c2t9" event={"ID":"2168cd4c-c18b-4d6e-be03-24a4749b9c66","Type":"ContainerStarted","Data":"1313afc584daf26bc1e89eac8b61e1ec6582ba2e8d2e41304bcf863e79e834a9"} Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.582515 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.604920 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-69bbfbf88f-7c2t9" podStartSLOduration=2.604900421 podStartE2EDuration="2.604900421s" podCreationTimestamp="2026-02-18 00:51:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:51:55.597879165 +0000 UTC m=+1057.165892335" watchObservedRunningTime="2026-02-18 00:51:55.604900421 +0000 UTC m=+1057.172913591" Feb 18 00:51:55 crc kubenswrapper[4791]: I0218 00:51:55.639094 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tc6ln" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.595420 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tc6ln" event={"ID":"2da8a059-b58c-4390-b09c-87158939e07f","Type":"ContainerStarted","Data":"b40b62c68b0fc7dd2f8d55b9578a29630814f2304eee2e9d7252ede6c032390f"} Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.595692 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tc6ln" event={"ID":"2da8a059-b58c-4390-b09c-87158939e07f","Type":"ContainerStarted","Data":"2d47ea1a0a901b58e3088effc42606de6eb9f1c980f98120ea294fc27cbaf606"} Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.595704 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tc6ln" event={"ID":"2da8a059-b58c-4390-b09c-87158939e07f","Type":"ContainerStarted","Data":"25307f3ec7e842fe5a6c49c6c683a64b4b9aa1af1cb43f4798189db74c43ee2b"} Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.596125 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tc6ln" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.613776 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-tc6ln" podStartSLOduration=3.613758884 podStartE2EDuration="3.613758884s" podCreationTimestamp="2026-02-18 00:51:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:51:56.612019421 +0000 UTC m=+1058.180032591" watchObservedRunningTime="2026-02-18 00:51:56.613758884 +0000 UTC m=+1058.181772044" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.799610 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.799680 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.799722 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.800414 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:51:56 crc kubenswrapper[4791]: I0218 00:51:56.800475 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178" gracePeriod=600 Feb 18 00:51:57 crc kubenswrapper[4791]: I0218 00:51:57.612501 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178" exitCode=0 Feb 18 00:51:57 crc kubenswrapper[4791]: I0218 00:51:57.612548 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178"} Feb 18 00:51:57 crc kubenswrapper[4791]: I0218 00:51:57.613002 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80"} Feb 18 00:51:57 crc kubenswrapper[4791]: I0218 00:51:57.613019 4791 scope.go:117] "RemoveContainer" containerID="321face9bfc6a821bbf92e76b694e43c887bc9e42886417d5315477f36b19cca" Feb 18 00:52:02 crc kubenswrapper[4791]: I0218 00:52:02.649269 4791 generic.go:334] "Generic (PLEG): container finished" podID="a5d9d585-69e8-4028-9939-a3c5e1f875fe" containerID="6ef77c523786a7f3bece6b59c3efb9c8f83bfc6640b66ededc69754e8c0aca60" exitCode=0 Feb 18 00:52:02 crc kubenswrapper[4791]: I0218 00:52:02.649525 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerDied","Data":"6ef77c523786a7f3bece6b59c3efb9c8f83bfc6640b66ededc69754e8c0aca60"} Feb 18 00:52:02 crc kubenswrapper[4791]: I0218 00:52:02.651394 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" event={"ID":"799e6275-aff5-49be-9af2-3f4187055abc","Type":"ContainerStarted","Data":"8e8e48700c9c4df5f55c36b24f9c613c451dd34f95facfe8c0a9040644cb543e"} Feb 18 00:52:02 crc kubenswrapper[4791]: I0218 00:52:02.651790 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:52:03 crc kubenswrapper[4791]: I0218 00:52:03.662811 4791 generic.go:334] "Generic (PLEG): container finished" podID="a5d9d585-69e8-4028-9939-a3c5e1f875fe" containerID="af5f71477a51d50d52d6150cb750c6e8ad0f458de826d782e68023e43a5ee5c5" exitCode=0 Feb 18 00:52:03 crc kubenswrapper[4791]: I0218 00:52:03.662900 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerDied","Data":"af5f71477a51d50d52d6150cb750c6e8ad0f458de826d782e68023e43a5ee5c5"} Feb 18 00:52:03 crc kubenswrapper[4791]: I0218 00:52:03.700066 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" podStartSLOduration=3.713144259 podStartE2EDuration="10.700040178s" podCreationTimestamp="2026-02-18 00:51:53 +0000 UTC" firstStartedPulling="2026-02-18 00:51:54.525089406 +0000 UTC m=+1056.093102576" lastFinishedPulling="2026-02-18 00:52:01.511985325 +0000 UTC m=+1063.079998495" observedRunningTime="2026-02-18 00:52:02.691485633 +0000 UTC m=+1064.259498833" watchObservedRunningTime="2026-02-18 00:52:03.700040178 +0000 UTC m=+1065.268053378" Feb 18 00:52:04 crc kubenswrapper[4791]: I0218 00:52:04.173177 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-69bbfbf88f-7c2t9" Feb 18 00:52:04 crc kubenswrapper[4791]: I0218 00:52:04.675456 4791 generic.go:334] "Generic (PLEG): container finished" podID="a5d9d585-69e8-4028-9939-a3c5e1f875fe" containerID="7860036e8fbc64c84757179b94ba0a11b24bbbdb6b39d865168c94f50c462cc9" exitCode=0 Feb 18 00:52:04 crc kubenswrapper[4791]: I0218 00:52:04.675523 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerDied","Data":"7860036e8fbc64c84757179b94ba0a11b24bbbdb6b39d865168c94f50c462cc9"} Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.643113 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tc6ln" Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.689234 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"fcd03d8775e5f168c5e0aaa34fa7b8ebae0c0d689ea342da037f32911c306cd7"} Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.689276 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"0531325bd8fc7282f8c8907c1fe1259d5842420ac009a32abeaaa0283b522a97"} Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.689286 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"95e86a24b8acffca850c8edb4cd9d2b6b898d1719ef9426601f28c3349a5b242"} Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.689294 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"6564b1261840f1b4f9b3081db61837d4db11272529585b04638359bc44434ccd"} Feb 18 00:52:05 crc kubenswrapper[4791]: I0218 00:52:05.689303 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"5376872f0167fe8af1bff69c7e281369b54da76ffbc515ef007e3d82d601f4c2"} Feb 18 00:52:06 crc kubenswrapper[4791]: I0218 00:52:06.699683 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gkll6" event={"ID":"a5d9d585-69e8-4028-9939-a3c5e1f875fe","Type":"ContainerStarted","Data":"543a4702d2c44700000dde53078e01315c272c6ed4f438165738abd6f0be83b4"} Feb 18 00:52:06 crc kubenswrapper[4791]: I0218 00:52:06.700052 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:52:06 crc kubenswrapper[4791]: I0218 00:52:06.728481 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-gkll6" podStartSLOduration=6.461061785 podStartE2EDuration="13.728458665s" podCreationTimestamp="2026-02-18 00:51:53 +0000 UTC" firstStartedPulling="2026-02-18 00:51:54.227651915 +0000 UTC m=+1055.795665075" lastFinishedPulling="2026-02-18 00:52:01.495048775 +0000 UTC m=+1063.063061955" observedRunningTime="2026-02-18 00:52:06.724833275 +0000 UTC m=+1068.292846445" watchObservedRunningTime="2026-02-18 00:52:06.728458665 +0000 UTC m=+1068.296471875" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.260260 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.261501 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.263632 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-rp9tt" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.264334 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.267449 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.276942 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.375440 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4twv\" (UniqueName: \"kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv\") pod \"openstack-operator-index-m2mfs\" (UID: \"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624\") " pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.477041 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4twv\" (UniqueName: \"kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv\") pod \"openstack-operator-index-m2mfs\" (UID: \"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624\") " pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.510231 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4twv\" (UniqueName: \"kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv\") pod \"openstack-operator-index-m2mfs\" (UID: \"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624\") " pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:08 crc kubenswrapper[4791]: I0218 00:52:08.604553 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:09 crc kubenswrapper[4791]: I0218 00:52:09.038017 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:52:09 crc kubenswrapper[4791]: I0218 00:52:09.074044 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:09 crc kubenswrapper[4791]: W0218 00:52:09.074966 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7fd0ba0_b7f5_4945_a7a7_71d5ee722624.slice/crio-2346fe98021d31518f87273fe9195b1805f2a837de8dde869730f674e8657a49 WatchSource:0}: Error finding container 2346fe98021d31518f87273fe9195b1805f2a837de8dde869730f674e8657a49: Status 404 returned error can't find the container with id 2346fe98021d31518f87273fe9195b1805f2a837de8dde869730f674e8657a49 Feb 18 00:52:09 crc kubenswrapper[4791]: I0218 00:52:09.085616 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:52:09 crc kubenswrapper[4791]: I0218 00:52:09.771305 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m2mfs" event={"ID":"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624","Type":"ContainerStarted","Data":"2346fe98021d31518f87273fe9195b1805f2a837de8dde869730f674e8657a49"} Feb 18 00:52:11 crc kubenswrapper[4791]: I0218 00:52:11.601418 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:11 crc kubenswrapper[4791]: I0218 00:52:11.786400 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m2mfs" event={"ID":"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624","Type":"ContainerStarted","Data":"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91"} Feb 18 00:52:11 crc kubenswrapper[4791]: I0218 00:52:11.804325 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-m2mfs" podStartSLOduration=1.510909665 podStartE2EDuration="3.804307684s" podCreationTimestamp="2026-02-18 00:52:08 +0000 UTC" firstStartedPulling="2026-02-18 00:52:09.078675572 +0000 UTC m=+1070.646688752" lastFinishedPulling="2026-02-18 00:52:11.372073601 +0000 UTC m=+1072.940086771" observedRunningTime="2026-02-18 00:52:11.803720896 +0000 UTC m=+1073.371734086" watchObservedRunningTime="2026-02-18 00:52:11.804307684 +0000 UTC m=+1073.372320864" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.203502 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-hccl8"] Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.204455 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.218966 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hccl8"] Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.366688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgl5c\" (UniqueName: \"kubernetes.io/projected/38a12dc2-60bc-48f8-9597-b5e899ab2971-kube-api-access-bgl5c\") pod \"openstack-operator-index-hccl8\" (UID: \"38a12dc2-60bc-48f8-9597-b5e899ab2971\") " pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.468875 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgl5c\" (UniqueName: \"kubernetes.io/projected/38a12dc2-60bc-48f8-9597-b5e899ab2971-kube-api-access-bgl5c\") pod \"openstack-operator-index-hccl8\" (UID: \"38a12dc2-60bc-48f8-9597-b5e899ab2971\") " pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.492130 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgl5c\" (UniqueName: \"kubernetes.io/projected/38a12dc2-60bc-48f8-9597-b5e899ab2971-kube-api-access-bgl5c\") pod \"openstack-operator-index-hccl8\" (UID: \"38a12dc2-60bc-48f8-9597-b5e899ab2971\") " pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.520300 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:12 crc kubenswrapper[4791]: I0218 00:52:12.796375 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-m2mfs" podUID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" containerName="registry-server" containerID="cri-o://289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91" gracePeriod=2 Feb 18 00:52:13 crc kubenswrapper[4791]: W0218 00:52:13.087758 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38a12dc2_60bc_48f8_9597_b5e899ab2971.slice/crio-0c8277809afbc263df8b25c091c2448b135b8629d61c36674e0ba85217a092b1 WatchSource:0}: Error finding container 0c8277809afbc263df8b25c091c2448b135b8629d61c36674e0ba85217a092b1: Status 404 returned error can't find the container with id 0c8277809afbc263df8b25c091c2448b135b8629d61c36674e0ba85217a092b1 Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.088121 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hccl8"] Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.156070 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.298811 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4twv\" (UniqueName: \"kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv\") pod \"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624\" (UID: \"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624\") " Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.303736 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv" (OuterVolumeSpecName: "kube-api-access-c4twv") pod "d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" (UID: "d7fd0ba0-b7f5-4945-a7a7-71d5ee722624"). InnerVolumeSpecName "kube-api-access-c4twv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.400883 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4twv\" (UniqueName: \"kubernetes.io/projected/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624-kube-api-access-c4twv\") on node \"crc\" DevicePath \"\"" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.806883 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hccl8" event={"ID":"38a12dc2-60bc-48f8-9597-b5e899ab2971","Type":"ContainerStarted","Data":"745a79487355aedb1bd3c4ed0f8ec9c66c63015ada2d6066c7e5093ddd5e1e4b"} Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.807138 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hccl8" event={"ID":"38a12dc2-60bc-48f8-9597-b5e899ab2971","Type":"ContainerStarted","Data":"0c8277809afbc263df8b25c091c2448b135b8629d61c36674e0ba85217a092b1"} Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.809111 4791 generic.go:334] "Generic (PLEG): container finished" podID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" containerID="289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91" exitCode=0 Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.809147 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m2mfs" event={"ID":"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624","Type":"ContainerDied","Data":"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91"} Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.809195 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m2mfs" event={"ID":"d7fd0ba0-b7f5-4945-a7a7-71d5ee722624","Type":"ContainerDied","Data":"2346fe98021d31518f87273fe9195b1805f2a837de8dde869730f674e8657a49"} Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.809215 4791 scope.go:117] "RemoveContainer" containerID="289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.809454 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m2mfs" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.828065 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-hccl8" podStartSLOduration=1.776696789 podStartE2EDuration="1.828038196s" podCreationTimestamp="2026-02-18 00:52:12 +0000 UTC" firstStartedPulling="2026-02-18 00:52:13.093285927 +0000 UTC m=+1074.661299097" lastFinishedPulling="2026-02-18 00:52:13.144627294 +0000 UTC m=+1074.712640504" observedRunningTime="2026-02-18 00:52:13.821705422 +0000 UTC m=+1075.389718592" watchObservedRunningTime="2026-02-18 00:52:13.828038196 +0000 UTC m=+1075.396051376" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.830026 4791 scope.go:117] "RemoveContainer" containerID="289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91" Feb 18 00:52:13 crc kubenswrapper[4791]: E0218 00:52:13.830491 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91\": container with ID starting with 289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91 not found: ID does not exist" containerID="289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.830558 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91"} err="failed to get container status \"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91\": rpc error: code = NotFound desc = could not find container \"289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91\": container with ID starting with 289910a5977e52d2f6597600ed83000e0e0cbc10eae6352243cc638ed328aa91 not found: ID does not exist" Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.857531 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:13 crc kubenswrapper[4791]: I0218 00:52:13.864688 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-m2mfs"] Feb 18 00:52:14 crc kubenswrapper[4791]: I0218 00:52:14.040341 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-gkll6" Feb 18 00:52:14 crc kubenswrapper[4791]: I0218 00:52:14.055566 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-78b44bf5bb-h4g9l" Feb 18 00:52:15 crc kubenswrapper[4791]: I0218 00:52:15.072742 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" path="/var/lib/kubelet/pods/d7fd0ba0-b7f5-4945-a7a7-71d5ee722624/volumes" Feb 18 00:52:22 crc kubenswrapper[4791]: I0218 00:52:22.521762 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:22 crc kubenswrapper[4791]: I0218 00:52:22.523640 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:22 crc kubenswrapper[4791]: I0218 00:52:22.569979 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:22 crc kubenswrapper[4791]: I0218 00:52:22.936225 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-hccl8" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.639530 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd"] Feb 18 00:52:29 crc kubenswrapper[4791]: E0218 00:52:29.640248 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" containerName="registry-server" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.640261 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" containerName="registry-server" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.640418 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7fd0ba0-b7f5-4945-a7a7-71d5ee722624" containerName="registry-server" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.641411 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.651225 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-8g8j2" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.657331 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd"] Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.713865 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.714021 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.714054 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj7sl\" (UniqueName: \"kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.815456 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.815578 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.815607 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj7sl\" (UniqueName: \"kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.816438 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.816715 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.835250 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj7sl\" (UniqueName: \"kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl\") pod \"839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:29 crc kubenswrapper[4791]: I0218 00:52:29.965224 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:30 crc kubenswrapper[4791]: I0218 00:52:30.205967 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd"] Feb 18 00:52:30 crc kubenswrapper[4791]: I0218 00:52:30.982087 4791 generic.go:334] "Generic (PLEG): container finished" podID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerID="8c2a3ecb7d5aed0509441bbd1ecf143e9c989f117f125d7eafc34096eb0efc30" exitCode=0 Feb 18 00:52:30 crc kubenswrapper[4791]: I0218 00:52:30.982181 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" event={"ID":"b8c638bd-8bce-4fc7-9289-312eee3c2be4","Type":"ContainerDied","Data":"8c2a3ecb7d5aed0509441bbd1ecf143e9c989f117f125d7eafc34096eb0efc30"} Feb 18 00:52:30 crc kubenswrapper[4791]: I0218 00:52:30.982223 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" event={"ID":"b8c638bd-8bce-4fc7-9289-312eee3c2be4","Type":"ContainerStarted","Data":"783ecaf6b559cce36799ef9bfe435770ec08b9f669a94b8df61704372ef69282"} Feb 18 00:52:31 crc kubenswrapper[4791]: I0218 00:52:31.992369 4791 generic.go:334] "Generic (PLEG): container finished" podID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerID="f803a3cbd37744d0c762275c934161b83ba6f8af35d90ae3225925bd590f170f" exitCode=0 Feb 18 00:52:31 crc kubenswrapper[4791]: I0218 00:52:31.992433 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" event={"ID":"b8c638bd-8bce-4fc7-9289-312eee3c2be4","Type":"ContainerDied","Data":"f803a3cbd37744d0c762275c934161b83ba6f8af35d90ae3225925bd590f170f"} Feb 18 00:52:33 crc kubenswrapper[4791]: I0218 00:52:33.003361 4791 generic.go:334] "Generic (PLEG): container finished" podID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerID="181579ee15b7ca78d1b4519cf681eab326fb4ccaf2977f6923c198d7b151fd24" exitCode=0 Feb 18 00:52:33 crc kubenswrapper[4791]: I0218 00:52:33.003759 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" event={"ID":"b8c638bd-8bce-4fc7-9289-312eee3c2be4","Type":"ContainerDied","Data":"181579ee15b7ca78d1b4519cf681eab326fb4ccaf2977f6923c198d7b151fd24"} Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.436570 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.595669 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle\") pod \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.595886 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util\") pod \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.595932 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj7sl\" (UniqueName: \"kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl\") pod \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\" (UID: \"b8c638bd-8bce-4fc7-9289-312eee3c2be4\") " Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.596348 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle" (OuterVolumeSpecName: "bundle") pod "b8c638bd-8bce-4fc7-9289-312eee3c2be4" (UID: "b8c638bd-8bce-4fc7-9289-312eee3c2be4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.602315 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl" (OuterVolumeSpecName: "kube-api-access-cj7sl") pod "b8c638bd-8bce-4fc7-9289-312eee3c2be4" (UID: "b8c638bd-8bce-4fc7-9289-312eee3c2be4"). InnerVolumeSpecName "kube-api-access-cj7sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.614885 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util" (OuterVolumeSpecName: "util") pod "b8c638bd-8bce-4fc7-9289-312eee3c2be4" (UID: "b8c638bd-8bce-4fc7-9289-312eee3c2be4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.697457 4791 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-util\") on node \"crc\" DevicePath \"\"" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.697774 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj7sl\" (UniqueName: \"kubernetes.io/projected/b8c638bd-8bce-4fc7-9289-312eee3c2be4-kube-api-access-cj7sl\") on node \"crc\" DevicePath \"\"" Feb 18 00:52:34 crc kubenswrapper[4791]: I0218 00:52:34.697875 4791 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8c638bd-8bce-4fc7-9289-312eee3c2be4-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:52:35 crc kubenswrapper[4791]: I0218 00:52:35.029083 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" event={"ID":"b8c638bd-8bce-4fc7-9289-312eee3c2be4","Type":"ContainerDied","Data":"783ecaf6b559cce36799ef9bfe435770ec08b9f669a94b8df61704372ef69282"} Feb 18 00:52:35 crc kubenswrapper[4791]: I0218 00:52:35.029140 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="783ecaf6b559cce36799ef9bfe435770ec08b9f669a94b8df61704372ef69282" Feb 18 00:52:35 crc kubenswrapper[4791]: I0218 00:52:35.029564 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.869979 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp"] Feb 18 00:52:41 crc kubenswrapper[4791]: E0218 00:52:41.870970 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="extract" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.870987 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="extract" Feb 18 00:52:41 crc kubenswrapper[4791]: E0218 00:52:41.871008 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="pull" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.871015 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="pull" Feb 18 00:52:41 crc kubenswrapper[4791]: E0218 00:52:41.871032 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="util" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.871039 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="util" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.871245 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8c638bd-8bce-4fc7-9289-312eee3c2be4" containerName="extract" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.871890 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.873386 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-k75db" Feb 18 00:52:41 crc kubenswrapper[4791]: I0218 00:52:41.901122 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp"] Feb 18 00:52:42 crc kubenswrapper[4791]: I0218 00:52:42.017598 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sbgb\" (UniqueName: \"kubernetes.io/projected/acd3a85c-bdae-4262-a8a0-3da693230a86-kube-api-access-9sbgb\") pod \"openstack-operator-controller-init-69ff8ccd5-7nmcp\" (UID: \"acd3a85c-bdae-4262-a8a0-3da693230a86\") " pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:42 crc kubenswrapper[4791]: I0218 00:52:42.118882 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sbgb\" (UniqueName: \"kubernetes.io/projected/acd3a85c-bdae-4262-a8a0-3da693230a86-kube-api-access-9sbgb\") pod \"openstack-operator-controller-init-69ff8ccd5-7nmcp\" (UID: \"acd3a85c-bdae-4262-a8a0-3da693230a86\") " pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:42 crc kubenswrapper[4791]: I0218 00:52:42.140645 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sbgb\" (UniqueName: \"kubernetes.io/projected/acd3a85c-bdae-4262-a8a0-3da693230a86-kube-api-access-9sbgb\") pod \"openstack-operator-controller-init-69ff8ccd5-7nmcp\" (UID: \"acd3a85c-bdae-4262-a8a0-3da693230a86\") " pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:42 crc kubenswrapper[4791]: I0218 00:52:42.192082 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:42 crc kubenswrapper[4791]: I0218 00:52:42.659150 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp"] Feb 18 00:52:43 crc kubenswrapper[4791]: I0218 00:52:43.093771 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" event={"ID":"acd3a85c-bdae-4262-a8a0-3da693230a86","Type":"ContainerStarted","Data":"9dc16a7bad8ed28ae8bc1f130d87e95465eb5dec379f91df3fefbf08bf6ff7f5"} Feb 18 00:52:47 crc kubenswrapper[4791]: I0218 00:52:47.130933 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" event={"ID":"acd3a85c-bdae-4262-a8a0-3da693230a86","Type":"ContainerStarted","Data":"8f8108e32c7a3b0ae12ba5f55b7aeb677d1c71641bc52a988532cc5ffede72a8"} Feb 18 00:52:47 crc kubenswrapper[4791]: I0218 00:52:47.131555 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:52:47 crc kubenswrapper[4791]: I0218 00:52:47.163924 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" podStartSLOduration=2.214361476 podStartE2EDuration="6.163902852s" podCreationTimestamp="2026-02-18 00:52:41 +0000 UTC" firstStartedPulling="2026-02-18 00:52:42.670011107 +0000 UTC m=+1104.238024267" lastFinishedPulling="2026-02-18 00:52:46.619552473 +0000 UTC m=+1108.187565643" observedRunningTime="2026-02-18 00:52:47.157952749 +0000 UTC m=+1108.725965919" watchObservedRunningTime="2026-02-18 00:52:47.163902852 +0000 UTC m=+1108.731916022" Feb 18 00:52:52 crc kubenswrapper[4791]: I0218 00:52:52.194070 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-69ff8ccd5-7nmcp" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.856237 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.857876 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.859821 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-qdzvs" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.861888 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.862937 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.867788 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zg29q" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.872809 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.886143 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.891221 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.894376 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-gjb69" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.901220 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.932114 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxx8v\" (UniqueName: \"kubernetes.io/projected/189a189d-5801-4406-8450-1cff37f84bbb-kube-api-access-jxx8v\") pod \"barbican-operator-controller-manager-868647ff47-kcd8v\" (UID: \"189a189d-5801-4406-8450-1cff37f84bbb\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.932246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8dxc\" (UniqueName: \"kubernetes.io/projected/e10a708b-16cd-467f-b166-00429da94123-kube-api-access-b8dxc\") pod \"cinder-operator-controller-manager-5d946d989d-pbsxj\" (UID: \"e10a708b-16cd-467f-b166-00429da94123\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.939998 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.940978 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.943543 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-z9tkj" Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.951302 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6"] Feb 18 00:53:29 crc kubenswrapper[4791]: I0218 00:53:29.979256 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.033713 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gxfs\" (UniqueName: \"kubernetes.io/projected/e8318b6f-bf0c-447f-a43c-0ac54f9c60a4-kube-api-access-5gxfs\") pod \"designate-operator-controller-manager-6d8bf5c495-nzmq6\" (UID: \"e8318b6f-bf0c-447f-a43c-0ac54f9c60a4\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.033938 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8dxc\" (UniqueName: \"kubernetes.io/projected/e10a708b-16cd-467f-b166-00429da94123-kube-api-access-b8dxc\") pod \"cinder-operator-controller-manager-5d946d989d-pbsxj\" (UID: \"e10a708b-16cd-467f-b166-00429da94123\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.034059 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn7bx\" (UniqueName: \"kubernetes.io/projected/b9ce1944-2b04-437c-9b3e-4cafb6d68ecf-kube-api-access-wn7bx\") pod \"glance-operator-controller-manager-77987464f4-k2x8h\" (UID: \"b9ce1944-2b04-437c-9b3e-4cafb6d68ecf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.041488 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxx8v\" (UniqueName: \"kubernetes.io/projected/189a189d-5801-4406-8450-1cff37f84bbb-kube-api-access-jxx8v\") pod \"barbican-operator-controller-manager-868647ff47-kcd8v\" (UID: \"189a189d-5801-4406-8450-1cff37f84bbb\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.044330 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.045806 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.052039 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-tj6lm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.079214 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.089228 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-4xrps"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.090319 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.097542 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-xdszg" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.097553 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8dxc\" (UniqueName: \"kubernetes.io/projected/e10a708b-16cd-467f-b166-00429da94123-kube-api-access-b8dxc\") pod \"cinder-operator-controller-manager-5d946d989d-pbsxj\" (UID: \"e10a708b-16cd-467f-b166-00429da94123\") " pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.097874 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.098601 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxx8v\" (UniqueName: \"kubernetes.io/projected/189a189d-5801-4406-8450-1cff37f84bbb-kube-api-access-jxx8v\") pod \"barbican-operator-controller-manager-868647ff47-kcd8v\" (UID: \"189a189d-5801-4406-8450-1cff37f84bbb\") " pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.099151 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.103737 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.105225 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-4xrps"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.111739 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-drw56" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.141537 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.142658 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn7bx\" (UniqueName: \"kubernetes.io/projected/b9ce1944-2b04-437c-9b3e-4cafb6d68ecf-kube-api-access-wn7bx\") pod \"glance-operator-controller-manager-77987464f4-k2x8h\" (UID: \"b9ce1944-2b04-437c-9b3e-4cafb6d68ecf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.142702 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.142731 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdhww\" (UniqueName: \"kubernetes.io/projected/c0f7fb62-ebcb-4989-8913-8b4b488df740-kube-api-access-pdhww\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.142797 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gxfs\" (UniqueName: \"kubernetes.io/projected/e8318b6f-bf0c-447f-a43c-0ac54f9c60a4-kube-api-access-5gxfs\") pod \"designate-operator-controller-manager-6d8bf5c495-nzmq6\" (UID: \"e8318b6f-bf0c-447f-a43c-0ac54f9c60a4\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.142847 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l2h4\" (UniqueName: \"kubernetes.io/projected/2f773ffa-c1bb-4096-b44d-b01e7d9158c3-kube-api-access-2l2h4\") pod \"heat-operator-controller-manager-69f49c598c-dhrvm\" (UID: \"2f773ffa-c1bb-4096-b44d-b01e7d9158c3\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.151376 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.152399 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.159519 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-k74h4" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.166778 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gxfs\" (UniqueName: \"kubernetes.io/projected/e8318b6f-bf0c-447f-a43c-0ac54f9c60a4-kube-api-access-5gxfs\") pod \"designate-operator-controller-manager-6d8bf5c495-nzmq6\" (UID: \"e8318b6f-bf0c-447f-a43c-0ac54f9c60a4\") " pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.179349 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn7bx\" (UniqueName: \"kubernetes.io/projected/b9ce1944-2b04-437c-9b3e-4cafb6d68ecf-kube-api-access-wn7bx\") pod \"glance-operator-controller-manager-77987464f4-k2x8h\" (UID: \"b9ce1944-2b04-437c-9b3e-4cafb6d68ecf\") " pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.185071 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.192532 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.202063 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.211091 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.217124 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.218583 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.221006 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-vt2qv" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.254589 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgzks\" (UniqueName: \"kubernetes.io/projected/e029392b-011d-4edb-84bf-851fb6e9828f-kube-api-access-mgzks\") pod \"ironic-operator-controller-manager-554564d7fc-265gr\" (UID: \"e029392b-011d-4edb-84bf-851fb6e9828f\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.254962 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.254983 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tntqs\" (UniqueName: \"kubernetes.io/projected/cd9b7e04-15d6-4e16-9e30-85bbd31605fa-kube-api-access-tntqs\") pod \"horizon-operator-controller-manager-5b9b8895d5-nq4q5\" (UID: \"cd9b7e04-15d6-4e16-9e30-85bbd31605fa\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.255009 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdhww\" (UniqueName: \"kubernetes.io/projected/c0f7fb62-ebcb-4989-8913-8b4b488df740-kube-api-access-pdhww\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.255075 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l2h4\" (UniqueName: \"kubernetes.io/projected/2f773ffa-c1bb-4096-b44d-b01e7d9158c3-kube-api-access-2l2h4\") pod \"heat-operator-controller-manager-69f49c598c-dhrvm\" (UID: \"2f773ffa-c1bb-4096-b44d-b01e7d9158c3\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:30 crc kubenswrapper[4791]: E0218 00:53:30.256494 4791 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:30 crc kubenswrapper[4791]: E0218 00:53:30.256582 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert podName:c0f7fb62-ebcb-4989-8913-8b4b488df740 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:30.756561319 +0000 UTC m=+1152.324574489 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert") pod "infra-operator-controller-manager-79d975b745-4xrps" (UID: "c0f7fb62-ebcb-4989-8913-8b4b488df740") : secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.257356 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.277068 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.277611 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-mw585"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.278813 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.285045 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-hxmmj" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.286711 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdhww\" (UniqueName: \"kubernetes.io/projected/c0f7fb62-ebcb-4989-8913-8b4b488df740-kube-api-access-pdhww\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.292264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l2h4\" (UniqueName: \"kubernetes.io/projected/2f773ffa-c1bb-4096-b44d-b01e7d9158c3-kube-api-access-2l2h4\") pod \"heat-operator-controller-manager-69f49c598c-dhrvm\" (UID: \"2f773ffa-c1bb-4096-b44d-b01e7d9158c3\") " pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.300579 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-mw585"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.312335 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.313887 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.317892 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-mbfg2" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.320219 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.322287 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.327454 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6jgb8" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.329209 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.359561 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw59n\" (UniqueName: \"kubernetes.io/projected/7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177-kube-api-access-hw59n\") pod \"manila-operator-controller-manager-54f6768c69-mw585\" (UID: \"7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.359625 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgzks\" (UniqueName: \"kubernetes.io/projected/e029392b-011d-4edb-84bf-851fb6e9828f-kube-api-access-mgzks\") pod \"ironic-operator-controller-manager-554564d7fc-265gr\" (UID: \"e029392b-011d-4edb-84bf-851fb6e9828f\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.359657 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w6zn\" (UniqueName: \"kubernetes.io/projected/4e11abdf-c683-4f45-a448-dcdfadbd9731-kube-api-access-9w6zn\") pod \"keystone-operator-controller-manager-b4d948c87-jqbhr\" (UID: \"4e11abdf-c683-4f45-a448-dcdfadbd9731\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.359730 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tntqs\" (UniqueName: \"kubernetes.io/projected/cd9b7e04-15d6-4e16-9e30-85bbd31605fa-kube-api-access-tntqs\") pod \"horizon-operator-controller-manager-5b9b8895d5-nq4q5\" (UID: \"cd9b7e04-15d6-4e16-9e30-85bbd31605fa\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.381458 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.394462 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.395145 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tntqs\" (UniqueName: \"kubernetes.io/projected/cd9b7e04-15d6-4e16-9e30-85bbd31605fa-kube-api-access-tntqs\") pod \"horizon-operator-controller-manager-5b9b8895d5-nq4q5\" (UID: \"cd9b7e04-15d6-4e16-9e30-85bbd31605fa\") " pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.400683 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.405679 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-sv6sm" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.418491 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgzks\" (UniqueName: \"kubernetes.io/projected/e029392b-011d-4edb-84bf-851fb6e9828f-kube-api-access-mgzks\") pod \"ironic-operator-controller-manager-554564d7fc-265gr\" (UID: \"e029392b-011d-4edb-84bf-851fb6e9828f\") " pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.418556 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.429713 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.463957 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw59n\" (UniqueName: \"kubernetes.io/projected/7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177-kube-api-access-hw59n\") pod \"manila-operator-controller-manager-54f6768c69-mw585\" (UID: \"7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.464018 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w6zn\" (UniqueName: \"kubernetes.io/projected/4e11abdf-c683-4f45-a448-dcdfadbd9731-kube-api-access-9w6zn\") pod \"keystone-operator-controller-manager-b4d948c87-jqbhr\" (UID: \"4e11abdf-c683-4f45-a448-dcdfadbd9731\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.464041 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-499fw\" (UniqueName: \"kubernetes.io/projected/f1db1f83-f5b1-4c8d-af64-92816b1aa96d-kube-api-access-499fw\") pod \"nova-operator-controller-manager-567668f5cf-2klv9\" (UID: \"f1db1f83-f5b1-4c8d-af64-92816b1aa96d\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.464103 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54zdz\" (UniqueName: \"kubernetes.io/projected/af444709-16b7-4e86-bf27-b0f4bcbd07d6-kube-api-access-54zdz\") pod \"mariadb-operator-controller-manager-6994f66f48-9sxdc\" (UID: \"af444709-16b7-4e86-bf27-b0f4bcbd07d6\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.464134 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd9w7\" (UniqueName: \"kubernetes.io/projected/0864bcf0-8b89-4a98-b294-cac0ec858221-kube-api-access-hd9w7\") pod \"neutron-operator-controller-manager-64ddbf8bb-9xxkb\" (UID: \"0864bcf0-8b89-4a98-b294-cac0ec858221\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.475465 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.484747 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.486433 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw59n\" (UniqueName: \"kubernetes.io/projected/7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177-kube-api-access-hw59n\") pod \"manila-operator-controller-manager-54f6768c69-mw585\" (UID: \"7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177\") " pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.489514 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.489820 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5ms69" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.495548 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.497023 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.498604 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w6zn\" (UniqueName: \"kubernetes.io/projected/4e11abdf-c683-4f45-a448-dcdfadbd9731-kube-api-access-9w6zn\") pod \"keystone-operator-controller-manager-b4d948c87-jqbhr\" (UID: \"4e11abdf-c683-4f45-a448-dcdfadbd9731\") " pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.510969 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.512835 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-pmw4m" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.519144 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.522304 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.525021 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-jmb42" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.532652 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.533723 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.536007 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.536359 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5jrkx" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.551922 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.562662 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.565803 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-499fw\" (UniqueName: \"kubernetes.io/projected/f1db1f83-f5b1-4c8d-af64-92816b1aa96d-kube-api-access-499fw\") pod \"nova-operator-controller-manager-567668f5cf-2klv9\" (UID: \"f1db1f83-f5b1-4c8d-af64-92816b1aa96d\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.565860 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trl5f\" (UniqueName: \"kubernetes.io/projected/be30d6a9-7cd4-482d-a12d-e21de55366c1-kube-api-access-trl5f\") pod \"octavia-operator-controller-manager-69f8888797-gz2td\" (UID: \"be30d6a9-7cd4-482d-a12d-e21de55366c1\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.565903 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54zdz\" (UniqueName: \"kubernetes.io/projected/af444709-16b7-4e86-bf27-b0f4bcbd07d6-kube-api-access-54zdz\") pod \"mariadb-operator-controller-manager-6994f66f48-9sxdc\" (UID: \"af444709-16b7-4e86-bf27-b0f4bcbd07d6\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.565937 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd9w7\" (UniqueName: \"kubernetes.io/projected/0864bcf0-8b89-4a98-b294-cac0ec858221-kube-api-access-hd9w7\") pod \"neutron-operator-controller-manager-64ddbf8bb-9xxkb\" (UID: \"0864bcf0-8b89-4a98-b294-cac0ec858221\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.566034 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqqvr\" (UniqueName: \"kubernetes.io/projected/2c9de6b9-8cd5-4082-b89e-88958f7cb27e-kube-api-access-wqqvr\") pod \"ovn-operator-controller-manager-d44cf6b75-97ckn\" (UID: \"2c9de6b9-8cd5-4082-b89e-88958f7cb27e\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.583141 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7"] Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.584249 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.599753 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-xrzts" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.601743 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.602739 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:53:30 crc kubenswrapper[4791]: I0218 00:53:30.605727 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.267566 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd9w7\" (UniqueName: \"kubernetes.io/projected/0864bcf0-8b89-4a98-b294-cac0ec858221-kube-api-access-hd9w7\") pod \"neutron-operator-controller-manager-64ddbf8bb-9xxkb\" (UID: \"0864bcf0-8b89-4a98-b294-cac0ec858221\") " pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.272604 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-499fw\" (UniqueName: \"kubernetes.io/projected/f1db1f83-f5b1-4c8d-af64-92816b1aa96d-kube-api-access-499fw\") pod \"nova-operator-controller-manager-567668f5cf-2klv9\" (UID: \"f1db1f83-f5b1-4c8d-af64-92816b1aa96d\") " pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.273464 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54zdz\" (UniqueName: \"kubernetes.io/projected/af444709-16b7-4e86-bf27-b0f4bcbd07d6-kube-api-access-54zdz\") pod \"mariadb-operator-controller-manager-6994f66f48-9sxdc\" (UID: \"af444709-16b7-4e86-bf27-b0f4bcbd07d6\") " pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.397510 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.398691 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.399036 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.409851 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqqvr\" (UniqueName: \"kubernetes.io/projected/2c9de6b9-8cd5-4082-b89e-88958f7cb27e-kube-api-access-wqqvr\") pod \"ovn-operator-controller-manager-d44cf6b75-97ckn\" (UID: \"2c9de6b9-8cd5-4082-b89e-88958f7cb27e\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.409972 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zvfw\" (UniqueName: \"kubernetes.io/projected/35eeab77-8059-4f0c-8742-3f72c2ffab54-kube-api-access-2zvfw\") pod \"placement-operator-controller-manager-8497b45c89-47vp7\" (UID: \"35eeab77-8059-4f0c-8742-3f72c2ffab54\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.410256 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trl5f\" (UniqueName: \"kubernetes.io/projected/be30d6a9-7cd4-482d-a12d-e21de55366c1-kube-api-access-trl5f\") pod \"octavia-operator-controller-manager-69f8888797-gz2td\" (UID: \"be30d6a9-7cd4-482d-a12d-e21de55366c1\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.410303 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.410508 4791 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.410568 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert podName:c0f7fb62-ebcb-4989-8913-8b4b488df740 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:32.410552233 +0000 UTC m=+1153.978565403 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert") pod "infra-operator-controller-manager-79d975b745-4xrps" (UID: "c0f7fb62-ebcb-4989-8913-8b4b488df740") : secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.441351 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trl5f\" (UniqueName: \"kubernetes.io/projected/be30d6a9-7cd4-482d-a12d-e21de55366c1-kube-api-access-trl5f\") pod \"octavia-operator-controller-manager-69f8888797-gz2td\" (UID: \"be30d6a9-7cd4-482d-a12d-e21de55366c1\") " pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.443266 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqqvr\" (UniqueName: \"kubernetes.io/projected/2c9de6b9-8cd5-4082-b89e-88958f7cb27e-kube-api-access-wqqvr\") pod \"ovn-operator-controller-manager-d44cf6b75-97ckn\" (UID: \"2c9de6b9-8cd5-4082-b89e-88958f7cb27e\") " pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.444312 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.460897 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.461831 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.461909 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.464655 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-bqcj9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.503628 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-bx4hv"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.505555 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.511961 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-99mg5" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.521616 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsdtp\" (UniqueName: \"kubernetes.io/projected/a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6-kube-api-access-vsdtp\") pod \"swift-operator-controller-manager-68f46476f-5tvb7\" (UID: \"a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.521679 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8gw4\" (UniqueName: \"kubernetes.io/projected/ea2ae9de-8373-4a24-bbe8-2308ecc8dad2-kube-api-access-v8gw4\") pod \"telemetry-operator-controller-manager-c6f9cb8b-mnkq9\" (UID: \"ea2ae9de-8373-4a24-bbe8-2308ecc8dad2\") " pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.531549 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zvfw\" (UniqueName: \"kubernetes.io/projected/35eeab77-8059-4f0c-8742-3f72c2ffab54-kube-api-access-2zvfw\") pod \"placement-operator-controller-manager-8497b45c89-47vp7\" (UID: \"35eeab77-8059-4f0c-8742-3f72c2ffab54\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.531636 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l746v\" (UniqueName: \"kubernetes.io/projected/be246d6d-b866-4edb-bb80-3c84e27f0caa-kube-api-access-l746v\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.540423 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4tgc\" (UniqueName: \"kubernetes.io/projected/819eaf55-1011-4861-bca0-7aecb14098c3-kube-api-access-t4tgc\") pod \"test-operator-controller-manager-7866795846-bx4hv\" (UID: \"819eaf55-1011-4861-bca0-7aecb14098c3\") " pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.540638 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.549782 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.559414 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.565612 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-bx4hv"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.595894 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zvfw\" (UniqueName: \"kubernetes.io/projected/35eeab77-8059-4f0c-8742-3f72c2ffab54-kube-api-access-2zvfw\") pod \"placement-operator-controller-manager-8497b45c89-47vp7\" (UID: \"35eeab77-8059-4f0c-8742-3f72c2ffab54\") " pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.598723 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.601537 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.603113 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-9fdgv" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.608000 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.642449 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsdtp\" (UniqueName: \"kubernetes.io/projected/a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6-kube-api-access-vsdtp\") pod \"swift-operator-controller-manager-68f46476f-5tvb7\" (UID: \"a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.642919 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8gw4\" (UniqueName: \"kubernetes.io/projected/ea2ae9de-8373-4a24-bbe8-2308ecc8dad2-kube-api-access-v8gw4\") pod \"telemetry-operator-controller-manager-c6f9cb8b-mnkq9\" (UID: \"ea2ae9de-8373-4a24-bbe8-2308ecc8dad2\") " pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.643043 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l746v\" (UniqueName: \"kubernetes.io/projected/be246d6d-b866-4edb-bb80-3c84e27f0caa-kube-api-access-l746v\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.645447 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4tgc\" (UniqueName: \"kubernetes.io/projected/819eaf55-1011-4861-bca0-7aecb14098c3-kube-api-access-t4tgc\") pod \"test-operator-controller-manager-7866795846-bx4hv\" (UID: \"819eaf55-1011-4861-bca0-7aecb14098c3\") " pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.646859 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.647071 4791 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.647133 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert podName:be246d6d-b866-4edb-bb80-3c84e27f0caa nodeName:}" failed. No retries permitted until 2026-02-18 00:53:32.147117073 +0000 UTC m=+1153.715130243 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" (UID: "be246d6d-b866-4edb-bb80-3c84e27f0caa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.647084 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6kr5\" (UniqueName: \"kubernetes.io/projected/38f179b0-7f33-4d39-aa94-2ca4292ac94e-kube-api-access-f6kr5\") pod \"watcher-operator-controller-manager-5db88f68c-sjggr\" (UID: \"38f179b0-7f33-4d39-aa94-2ca4292ac94e\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.663078 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8gw4\" (UniqueName: \"kubernetes.io/projected/ea2ae9de-8373-4a24-bbe8-2308ecc8dad2-kube-api-access-v8gw4\") pod \"telemetry-operator-controller-manager-c6f9cb8b-mnkq9\" (UID: \"ea2ae9de-8373-4a24-bbe8-2308ecc8dad2\") " pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.665778 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4tgc\" (UniqueName: \"kubernetes.io/projected/819eaf55-1011-4861-bca0-7aecb14098c3-kube-api-access-t4tgc\") pod \"test-operator-controller-manager-7866795846-bx4hv\" (UID: \"819eaf55-1011-4861-bca0-7aecb14098c3\") " pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:53:31 crc kubenswrapper[4791]: W0218 00:53:31.667564 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod189a189d_5801_4406_8450_1cff37f84bbb.slice/crio-5251be6c098cfa817cd9c718ef23b3bc7823c65ffe25b9a5375f6f9487f38d31 WatchSource:0}: Error finding container 5251be6c098cfa817cd9c718ef23b3bc7823c65ffe25b9a5375f6f9487f38d31: Status 404 returned error can't find the container with id 5251be6c098cfa817cd9c718ef23b3bc7823c65ffe25b9a5375f6f9487f38d31 Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.669467 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l746v\" (UniqueName: \"kubernetes.io/projected/be246d6d-b866-4edb-bb80-3c84e27f0caa-kube-api-access-l746v\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.680866 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.682033 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.682151 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsdtp\" (UniqueName: \"kubernetes.io/projected/a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6-kube-api-access-vsdtp\") pod \"swift-operator-controller-manager-68f46476f-5tvb7\" (UID: \"a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6\") " pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.685128 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-nv8f9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.685435 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.685638 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.696331 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.707024 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.708281 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.714532 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-dbsq4" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.716030 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.720107 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.738297 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.764129 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghnbb\" (UniqueName: \"kubernetes.io/projected/692a8f45-9351-4d64-9571-20f46a3bd0ba-kube-api-access-ghnbb\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wl9hf\" (UID: \"692a8f45-9351-4d64-9571-20f46a3bd0ba\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.764578 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.764622 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.764663 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcgvz\" (UniqueName: \"kubernetes.io/projected/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-kube-api-access-bcgvz\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.764776 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6kr5\" (UniqueName: \"kubernetes.io/projected/38f179b0-7f33-4d39-aa94-2ca4292ac94e-kube-api-access-f6kr5\") pod \"watcher-operator-controller-manager-5db88f68c-sjggr\" (UID: \"38f179b0-7f33-4d39-aa94-2ca4292ac94e\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.770554 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.789646 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6kr5\" (UniqueName: \"kubernetes.io/projected/38f179b0-7f33-4d39-aa94-2ca4292ac94e-kube-api-access-f6kr5\") pod \"watcher-operator-controller-manager-5db88f68c-sjggr\" (UID: \"38f179b0-7f33-4d39-aa94-2ca4292ac94e\") " pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.809220 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.848605 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.866583 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.866675 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcgvz\" (UniqueName: \"kubernetes.io/projected/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-kube-api-access-bcgvz\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.866763 4791 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.866836 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:32.366818535 +0000 UTC m=+1153.934831705 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "webhook-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.866863 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghnbb\" (UniqueName: \"kubernetes.io/projected/692a8f45-9351-4d64-9571-20f46a3bd0ba-kube-api-access-ghnbb\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wl9hf\" (UID: \"692a8f45-9351-4d64-9571-20f46a3bd0ba\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.867034 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.867229 4791 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: E0218 00:53:31.867252 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:32.367245558 +0000 UTC m=+1153.935258728 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "metrics-server-cert" not found Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.884021 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcgvz\" (UniqueName: \"kubernetes.io/projected/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-kube-api-access-bcgvz\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.885833 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6"] Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.886262 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghnbb\" (UniqueName: \"kubernetes.io/projected/692a8f45-9351-4d64-9571-20f46a3bd0ba-kube-api-access-ghnbb\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wl9hf\" (UID: \"692a8f45-9351-4d64-9571-20f46a3bd0ba\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.888259 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.947793 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:53:31 crc kubenswrapper[4791]: I0218 00:53:31.956463 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.046429 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.174451 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.174668 4791 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.175306 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert podName:be246d6d-b866-4edb-bb80-3c84e27f0caa nodeName:}" failed. No retries permitted until 2026-02-18 00:53:33.175256864 +0000 UTC m=+1154.743270034 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" (UID: "be246d6d-b866-4edb-bb80-3c84e27f0caa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: W0218 00:53:32.284877 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f773ffa_c1bb_4096_b44d_b01e7d9158c3.slice/crio-2964ebc18690a592982410cf538e374e9c3d3f4a5dbae3330e52c0e5abe67d52 WatchSource:0}: Error finding container 2964ebc18690a592982410cf538e374e9c3d3f4a5dbae3330e52c0e5abe67d52: Status 404 returned error can't find the container with id 2964ebc18690a592982410cf538e374e9c3d3f4a5dbae3330e52c0e5abe67d52 Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.285707 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5"] Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.294273 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm"] Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.308427 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr"] Feb 18 00:53:32 crc kubenswrapper[4791]: W0218 00:53:32.310351 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9ce1944_2b04_437c_9b3e_4cafb6d68ecf.slice/crio-5866c29e341843c7799544fb48f2dd38923ef585ed9e426a08e40363156d39c5 WatchSource:0}: Error finding container 5866c29e341843c7799544fb48f2dd38923ef585ed9e426a08e40363156d39c5: Status 404 returned error can't find the container with id 5866c29e341843c7799544fb48f2dd38923ef585ed9e426a08e40363156d39c5 Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.325364 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h"] Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.362598 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb"] Feb 18 00:53:32 crc kubenswrapper[4791]: W0218 00:53:32.369588 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0864bcf0_8b89_4a98_b294_cac0ec858221.slice/crio-ab1bfb05384c4ecaa80bc09f2e4957bcff3b3f06e78c25c3e5d699cd29b62f2c WatchSource:0}: Error finding container ab1bfb05384c4ecaa80bc09f2e4957bcff3b3f06e78c25c3e5d699cd29b62f2c: Status 404 returned error can't find the container with id ab1bfb05384c4ecaa80bc09f2e4957bcff3b3f06e78c25c3e5d699cd29b62f2c Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.375071 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr"] Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.378242 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.378308 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.378430 4791 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.378463 4791 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.378492 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:33.378476529 +0000 UTC m=+1154.946489699 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "metrics-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.378518 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:33.37849958 +0000 UTC m=+1154.946512820 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.481232 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.481840 4791 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: E0218 00:53:32.481961 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert podName:c0f7fb62-ebcb-4989-8913-8b4b488df740 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:34.481930528 +0000 UTC m=+1156.049943718 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert") pod "infra-operator-controller-manager-79d975b745-4xrps" (UID: "c0f7fb62-ebcb-4989-8913-8b4b488df740") : secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.626968 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" event={"ID":"e10a708b-16cd-467f-b166-00429da94123","Type":"ContainerStarted","Data":"118ff55dd78b2172b98effbc3447c7b0e5d4827f8e024089ab2b0b1cef43118e"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.628887 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" event={"ID":"e029392b-011d-4edb-84bf-851fb6e9828f","Type":"ContainerStarted","Data":"52fb1b222fe70766f1dd3c8fe4a90f8c05acac58c44a7823d77f34957c47aba1"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.631476 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" event={"ID":"b9ce1944-2b04-437c-9b3e-4cafb6d68ecf","Type":"ContainerStarted","Data":"5866c29e341843c7799544fb48f2dd38923ef585ed9e426a08e40363156d39c5"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.633441 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" event={"ID":"e8318b6f-bf0c-447f-a43c-0ac54f9c60a4","Type":"ContainerStarted","Data":"8d38f0019a55c52935d3eb02ae2c6bffdb9c90ce4aed11dda484d7fd343e7386"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.634974 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" event={"ID":"4e11abdf-c683-4f45-a448-dcdfadbd9731","Type":"ContainerStarted","Data":"0475b03e2f2831bb28dfe3a38293143849f6f89456f54039eca32ffaddce612d"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.636293 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" event={"ID":"0864bcf0-8b89-4a98-b294-cac0ec858221","Type":"ContainerStarted","Data":"ab1bfb05384c4ecaa80bc09f2e4957bcff3b3f06e78c25c3e5d699cd29b62f2c"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.637709 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" event={"ID":"189a189d-5801-4406-8450-1cff37f84bbb","Type":"ContainerStarted","Data":"5251be6c098cfa817cd9c718ef23b3bc7823c65ffe25b9a5375f6f9487f38d31"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.639003 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" event={"ID":"cd9b7e04-15d6-4e16-9e30-85bbd31605fa","Type":"ContainerStarted","Data":"03f895895bd8bac73ec717087db24d66250eb6e543ab05d1dbd36f8a7d3fd9c1"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.643065 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" event={"ID":"2f773ffa-c1bb-4096-b44d-b01e7d9158c3","Type":"ContainerStarted","Data":"2964ebc18690a592982410cf538e374e9c3d3f4a5dbae3330e52c0e5abe67d52"} Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.985931 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7"] Feb 18 00:53:32 crc kubenswrapper[4791]: I0218 00:53:32.998739 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-54f6768c69-mw585"] Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.012930 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9"] Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.026908 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc"] Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.032945 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1db1f83_f5b1_4c8d_af64_92816b1aa96d.slice/crio-95fa6520f597e0d2f5bd4f478537e644db411d6820fbcaa709783f89f40ec1cd WatchSource:0}: Error finding container 95fa6520f597e0d2f5bd4f478537e644db411d6820fbcaa709783f89f40ec1cd: Status 404 returned error can't find the container with id 95fa6520f597e0d2f5bd4f478537e644db411d6820fbcaa709783f89f40ec1cd Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.033219 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3e4d5ea_ce2c_4d6b_93bd_f67a88f996a6.slice/crio-3c3c521629b03f756be4ac60527c73d42bb238672b20c739ad3734d92c457ac7 WatchSource:0}: Error finding container 3c3c521629b03f756be4ac60527c73d42bb238672b20c739ad3734d92c457ac7: Status 404 returned error can't find the container with id 3c3c521629b03f756be4ac60527c73d42bb238672b20c739ad3734d92c457ac7 Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.039540 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ebed3cd_6f5f_4d8b_9b8c_d857c9ca0177.slice/crio-e4dbdc670ef68b8e05b015b0abf7a7d66c59f71ca8d1818b079f0a1cc26b2319 WatchSource:0}: Error finding container e4dbdc670ef68b8e05b015b0abf7a7d66c59f71ca8d1818b079f0a1cc26b2319: Status 404 returned error can't find the container with id e4dbdc670ef68b8e05b015b0abf7a7d66c59f71ca8d1818b079f0a1cc26b2319 Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.040406 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9"] Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.049638 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn"] Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.114150 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c9de6b9_8cd5_4082_b89e_88958f7cb27e.slice/crio-dc8a0779104e9fe11513d88408bb26df409aa33ad5a97b3ee151fb414115130f WatchSource:0}: Error finding container dc8a0779104e9fe11513d88408bb26df409aa33ad5a97b3ee151fb414115130f: Status 404 returned error can't find the container with id dc8a0779104e9fe11513d88408bb26df409aa33ad5a97b3ee151fb414115130f Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.119754 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf444709_16b7_4e86_bf27_b0f4bcbd07d6.slice/crio-634f37c1460d7e3a33722f05dbb1b550b33a13e658557436fd276c3928dfa644 WatchSource:0}: Error finding container 634f37c1460d7e3a33722f05dbb1b550b33a13e658557436fd276c3928dfa644: Status 404 returned error can't find the container with id 634f37c1460d7e3a33722f05dbb1b550b33a13e658557436fd276c3928dfa644 Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.122069 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35eeab77_8059_4f0c_8742_3f72c2ffab54.slice/crio-1d9ef23b622dad875d3c9c25b93ae109ef15eeb7b061bef376653c416f423683 WatchSource:0}: Error finding container 1d9ef23b622dad875d3c9c25b93ae109ef15eeb7b061bef376653c416f423683: Status 404 returned error can't find the container with id 1d9ef23b622dad875d3c9c25b93ae109ef15eeb7b061bef376653c416f423683 Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.139340 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7"] Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.139374 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td"] Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.156686 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe30d6a9_7cd4_482d_a12d_e21de55366c1.slice/crio-bcfec5705916ec2f62505ebe9ec58f73079278bbeb4c77f6f14cd29172544452 WatchSource:0}: Error finding container bcfec5705916ec2f62505ebe9ec58f73079278bbeb4c77f6f14cd29172544452: Status 404 returned error can't find the container with id bcfec5705916ec2f62505ebe9ec58f73079278bbeb4c77f6f14cd29172544452 Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.204887 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.205091 4791 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.205199 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert podName:be246d6d-b866-4edb-bb80-3c84e27f0caa nodeName:}" failed. No retries permitted until 2026-02-18 00:53:35.205175425 +0000 UTC m=+1156.773188665 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" (UID: "be246d6d-b866-4edb-bb80-3c84e27f0caa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.327053 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7866795846-bx4hv"] Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.343175 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod819eaf55_1011_4861_bca0_7aecb14098c3.slice/crio-2cf5b066e9a847cc4557ce2f06f303396450058fe5b59c8d435bcd0486c6f0d9 WatchSource:0}: Error finding container 2cf5b066e9a847cc4557ce2f06f303396450058fe5b59c8d435bcd0486c6f0d9: Status 404 returned error can't find the container with id 2cf5b066e9a847cc4557ce2f06f303396450058fe5b59c8d435bcd0486c6f0d9 Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.357842 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr"] Feb 18 00:53:33 crc kubenswrapper[4791]: W0218 00:53:33.381470 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38f179b0_7f33_4d39_aa94_2ca4292ac94e.slice/crio-f5ef459ff70ef914473a54d342937aebc6f86a0a799f316275abdd1134ef2359 WatchSource:0}: Error finding container f5ef459ff70ef914473a54d342937aebc6f86a0a799f316275abdd1134ef2359: Status 404 returned error can't find the container with id f5ef459ff70ef914473a54d342937aebc6f86a0a799f316275abdd1134ef2359 Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.387555 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f6kr5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5db88f68c-sjggr_openstack-operators(38f179b0-7f33-4d39-aa94-2ca4292ac94e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.388814 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" podUID="38f179b0-7f33-4d39-aa94-2ca4292ac94e" Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.399456 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf"] Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.407736 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.407788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.407955 4791 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.408015 4791 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.408040 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:35.408001508 +0000 UTC m=+1156.976014678 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "metrics-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.408065 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:35.408051369 +0000 UTC m=+1156.976064529 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "webhook-server-cert" not found Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.413014 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ghnbb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-wl9hf_openstack-operators(692a8f45-9351-4d64-9571-20f46a3bd0ba): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.414717 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" podUID="692a8f45-9351-4d64-9571-20f46a3bd0ba" Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.652734 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" event={"ID":"35eeab77-8059-4f0c-8742-3f72c2ffab54","Type":"ContainerStarted","Data":"1d9ef23b622dad875d3c9c25b93ae109ef15eeb7b061bef376653c416f423683"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.654326 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" event={"ID":"af444709-16b7-4e86-bf27-b0f4bcbd07d6","Type":"ContainerStarted","Data":"634f37c1460d7e3a33722f05dbb1b550b33a13e658557436fd276c3928dfa644"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.656645 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" event={"ID":"7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177","Type":"ContainerStarted","Data":"e4dbdc670ef68b8e05b015b0abf7a7d66c59f71ca8d1818b079f0a1cc26b2319"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.658565 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" event={"ID":"f1db1f83-f5b1-4c8d-af64-92816b1aa96d","Type":"ContainerStarted","Data":"95fa6520f597e0d2f5bd4f478537e644db411d6820fbcaa709783f89f40ec1cd"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.661437 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" event={"ID":"2c9de6b9-8cd5-4082-b89e-88958f7cb27e","Type":"ContainerStarted","Data":"dc8a0779104e9fe11513d88408bb26df409aa33ad5a97b3ee151fb414115130f"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.662983 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" event={"ID":"be30d6a9-7cd4-482d-a12d-e21de55366c1","Type":"ContainerStarted","Data":"bcfec5705916ec2f62505ebe9ec58f73079278bbeb4c77f6f14cd29172544452"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.664388 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" event={"ID":"819eaf55-1011-4861-bca0-7aecb14098c3","Type":"ContainerStarted","Data":"2cf5b066e9a847cc4557ce2f06f303396450058fe5b59c8d435bcd0486c6f0d9"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.669735 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" event={"ID":"38f179b0-7f33-4d39-aa94-2ca4292ac94e","Type":"ContainerStarted","Data":"f5ef459ff70ef914473a54d342937aebc6f86a0a799f316275abdd1134ef2359"} Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.672556 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" podUID="38f179b0-7f33-4d39-aa94-2ca4292ac94e" Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.672706 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" event={"ID":"ea2ae9de-8373-4a24-bbe8-2308ecc8dad2","Type":"ContainerStarted","Data":"a95258ffe5097dcca139c00522ec0a518b56f2102e1e504cbae211f624a73f1d"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.676228 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" event={"ID":"a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6","Type":"ContainerStarted","Data":"3c3c521629b03f756be4ac60527c73d42bb238672b20c739ad3734d92c457ac7"} Feb 18 00:53:33 crc kubenswrapper[4791]: I0218 00:53:33.677901 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" event={"ID":"692a8f45-9351-4d64-9571-20f46a3bd0ba","Type":"ContainerStarted","Data":"be0d7ccdabbef21326ca21f88359dd49bd00b4f7fd33c6f1b08346388aa8d737"} Feb 18 00:53:33 crc kubenswrapper[4791]: E0218 00:53:33.680784 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" podUID="692a8f45-9351-4d64-9571-20f46a3bd0ba" Feb 18 00:53:34 crc kubenswrapper[4791]: I0218 00:53:34.535266 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:34 crc kubenswrapper[4791]: E0218 00:53:34.535477 4791 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:34 crc kubenswrapper[4791]: E0218 00:53:34.535527 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert podName:c0f7fb62-ebcb-4989-8913-8b4b488df740 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:38.535511649 +0000 UTC m=+1160.103524819 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert") pod "infra-operator-controller-manager-79d975b745-4xrps" (UID: "c0f7fb62-ebcb-4989-8913-8b4b488df740") : secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:34 crc kubenswrapper[4791]: E0218 00:53:34.698260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:d01ae848290e880c09127d5297418dea40fc7f090fdab9bf2c578c7e7f53aec0\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" podUID="38f179b0-7f33-4d39-aa94-2ca4292ac94e" Feb 18 00:53:34 crc kubenswrapper[4791]: E0218 00:53:34.701109 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" podUID="692a8f45-9351-4d64-9571-20f46a3bd0ba" Feb 18 00:53:35 crc kubenswrapper[4791]: I0218 00:53:35.264464 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.264684 4791 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.264758 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert podName:be246d6d-b866-4edb-bb80-3c84e27f0caa nodeName:}" failed. No retries permitted until 2026-02-18 00:53:39.264736828 +0000 UTC m=+1160.832749998 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" (UID: "be246d6d-b866-4edb-bb80-3c84e27f0caa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:35 crc kubenswrapper[4791]: I0218 00:53:35.472018 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:35 crc kubenswrapper[4791]: I0218 00:53:35.472134 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.472275 4791 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.472360 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:39.472342049 +0000 UTC m=+1161.040355219 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "metrics-server-cert" not found Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.472366 4791 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 18 00:53:35 crc kubenswrapper[4791]: E0218 00:53:35.472443 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:39.472423531 +0000 UTC m=+1161.040436761 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "webhook-server-cert" not found Feb 18 00:53:38 crc kubenswrapper[4791]: I0218 00:53:38.627294 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:38 crc kubenswrapper[4791]: E0218 00:53:38.627483 4791 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:38 crc kubenswrapper[4791]: E0218 00:53:38.627780 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert podName:c0f7fb62-ebcb-4989-8913-8b4b488df740 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:46.62776109 +0000 UTC m=+1168.195774260 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert") pod "infra-operator-controller-manager-79d975b745-4xrps" (UID: "c0f7fb62-ebcb-4989-8913-8b4b488df740") : secret "infra-operator-webhook-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: I0218 00:53:39.342869 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.343055 4791 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.343182 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert podName:be246d6d-b866-4edb-bb80-3c84e27f0caa nodeName:}" failed. No retries permitted until 2026-02-18 00:53:47.343121894 +0000 UTC m=+1168.911135064 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert") pod "openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" (UID: "be246d6d-b866-4edb-bb80-3c84e27f0caa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: I0218 00:53:39.545695 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:39 crc kubenswrapper[4791]: I0218 00:53:39.545769 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.545924 4791 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.545957 4791 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.545977 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:47.545964018 +0000 UTC m=+1169.113977188 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "webhook-server-cert" not found Feb 18 00:53:39 crc kubenswrapper[4791]: E0218 00:53:39.546017 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs podName:0d602aa9-6246-4b05-8a6c-7d3cfb607a36 nodeName:}" failed. No retries permitted until 2026-02-18 00:53:47.545999199 +0000 UTC m=+1169.114012359 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs") pod "openstack-operator-controller-manager-669759659c-xt6gk" (UID: "0d602aa9-6246-4b05-8a6c-7d3cfb607a36") : secret "metrics-server-cert" not found Feb 18 00:53:45 crc kubenswrapper[4791]: E0218 00:53:45.700250 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd" Feb 18 00:53:45 crc kubenswrapper[4791]: E0218 00:53:45.700870 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2zvfw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-8497b45c89-47vp7_openstack-operators(35eeab77-8059-4f0c-8742-3f72c2ffab54): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:45 crc kubenswrapper[4791]: E0218 00:53:45.702125 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" podUID="35eeab77-8059-4f0c-8742-3f72c2ffab54" Feb 18 00:53:46 crc kubenswrapper[4791]: E0218 00:53:46.372827 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:a57336b9f95b703f80453db87e43a2834ca1bdc89480796d28ebbe0a9702ecfd\\\"\"" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" podUID="35eeab77-8059-4f0c-8742-3f72c2ffab54" Feb 18 00:53:46 crc kubenswrapper[4791]: I0218 00:53:46.679100 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:46 crc kubenswrapper[4791]: I0218 00:53:46.684784 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c0f7fb62-ebcb-4989-8913-8b4b488df740-cert\") pod \"infra-operator-controller-manager-79d975b745-4xrps\" (UID: \"c0f7fb62-ebcb-4989-8913-8b4b488df740\") " pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:46 crc kubenswrapper[4791]: I0218 00:53:46.754804 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:53:47 crc kubenswrapper[4791]: E0218 00:53:47.014770 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34" Feb 18 00:53:47 crc kubenswrapper[4791]: E0218 00:53:47.015325 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-trl5f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-69f8888797-gz2td_openstack-operators(be30d6a9-7cd4-482d-a12d-e21de55366c1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:47 crc kubenswrapper[4791]: E0218 00:53:47.017092 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" podUID="be30d6a9-7cd4-482d-a12d-e21de55366c1" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.393568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.414500 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/be246d6d-b866-4edb-bb80-3c84e27f0caa-cert\") pod \"openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp\" (UID: \"be246d6d-b866-4edb-bb80-3c84e27f0caa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.601728 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.601783 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.605327 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-webhook-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.613553 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0d602aa9-6246-4b05-8a6c-7d3cfb607a36-metrics-certs\") pod \"openstack-operator-controller-manager-669759659c-xt6gk\" (UID: \"0d602aa9-6246-4b05-8a6c-7d3cfb607a36\") " pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.616335 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:53:47 crc kubenswrapper[4791]: I0218 00:53:47.680885 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:53:47 crc kubenswrapper[4791]: E0218 00:53:47.819760 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:229fc8c8d94dd4102d2151cd4ec1eaaa09d897c2b396d06e903f61ea29c1fa34\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" podUID="be30d6a9-7cd4-482d-a12d-e21de55366c1" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.437215 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:543c103838f3e6ef48755665a7695dfa3ed84753c557560257d265db31f92759" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.437758 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:543c103838f3e6ef48755665a7695dfa3ed84753c557560257d265db31f92759,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wqqvr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-d44cf6b75-97ckn_openstack-operators(2c9de6b9-8cd5-4082-b89e-88958f7cb27e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.439023 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" podUID="2c9de6b9-8cd5-4082-b89e-88958f7cb27e" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.842260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:543c103838f3e6ef48755665a7695dfa3ed84753c557560257d265db31f92759\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" podUID="2c9de6b9-8cd5-4082-b89e-88958f7cb27e" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.960833 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.961091 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mgzks,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-554564d7fc-265gr_openstack-operators(e029392b-011d-4edb-84bf-851fb6e9828f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:49 crc kubenswrapper[4791]: E0218 00:53:49.962450 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" podUID="e029392b-011d-4edb-84bf-851fb6e9828f" Feb 18 00:53:50 crc kubenswrapper[4791]: E0218 00:53:50.851015 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:7e1b0b7b172ad0d707ab80dd72d609e1d0f5bbd38a22c24a28ed0f17a960c867\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" podUID="e029392b-011d-4edb-84bf-851fb6e9828f" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.459529 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.459734 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vsdtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68f46476f-5tvb7_openstack-operators(a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.461515 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" podUID="a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.856388 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3d676f1281e24ef07de617570d2f7fbf625032e41866d1551a856c052248bb04\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" podUID="a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.861722 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8fb0a33b8d93cf9f84f079af5f2ceb680afada4e44542514959146779f57f64c" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.861890 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8fb0a33b8d93cf9f84f079af5f2ceb680afada4e44542514959146779f57f64c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hw59n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-54f6768c69-mw585_openstack-operators(7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:51 crc kubenswrapper[4791]: E0218 00:53:51.863051 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" podUID="7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177" Feb 18 00:53:52 crc kubenswrapper[4791]: E0218 00:53:52.863839 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8fb0a33b8d93cf9f84f079af5f2ceb680afada4e44542514959146779f57f64c\\\"\"" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" podUID="7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.226726 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:c1e33e962043cd6e3d09ebd225cb72781451dba7af2d57522e5c6eedbdc91642" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.226962 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:c1e33e962043cd6e3d09ebd225cb72781451dba7af2d57522e5c6eedbdc91642,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5gxfs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-6d8bf5c495-nzmq6_openstack-operators(e8318b6f-bf0c-447f-a43c-0ac54f9c60a4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.228269 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" podUID="e8318b6f-bf0c-447f-a43c-0ac54f9c60a4" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.738512 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.738718 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-54zdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-6994f66f48-9sxdc_openstack-operators(af444709-16b7-4e86-bf27-b0f4bcbd07d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.739912 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" podUID="af444709-16b7-4e86-bf27-b0f4bcbd07d6" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.811474 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.74:5001/openstack-k8s-operators/telemetry-operator:49fb0a393e644ad55559f09981950c6ee3a56dc1" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.811533 4791 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.74:5001/openstack-k8s-operators/telemetry-operator:49fb0a393e644ad55559f09981950c6ee3a56dc1" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.811666 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.74:5001/openstack-k8s-operators/telemetry-operator:49fb0a393e644ad55559f09981950c6ee3a56dc1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v8gw4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-c6f9cb8b-mnkq9_openstack-operators(ea2ae9de-8373-4a24-bbe8-2308ecc8dad2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.812906 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" podUID="ea2ae9de-8373-4a24-bbe8-2308ecc8dad2" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.871655 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.74:5001/openstack-k8s-operators/telemetry-operator:49fb0a393e644ad55559f09981950c6ee3a56dc1\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" podUID="ea2ae9de-8373-4a24-bbe8-2308ecc8dad2" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.871780 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:c1e33e962043cd6e3d09ebd225cb72781451dba7af2d57522e5c6eedbdc91642\\\"\"" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" podUID="e8318b6f-bf0c-447f-a43c-0ac54f9c60a4" Feb 18 00:53:53 crc kubenswrapper[4791]: E0218 00:53:53.872383 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:a18f12497b7159b100fcfd72c7ba2273d0669a5c00600a9ff1333bca028f256a\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" podUID="af444709-16b7-4e86-bf27-b0f4bcbd07d6" Feb 18 00:53:54 crc kubenswrapper[4791]: E0218 00:53:54.306519 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1" Feb 18 00:53:54 crc kubenswrapper[4791]: E0218 00:53:54.307119 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9w6zn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b4d948c87-jqbhr_openstack-operators(4e11abdf-c683-4f45-a448-dcdfadbd9731): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:54 crc kubenswrapper[4791]: E0218 00:53:54.308612 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" podUID="4e11abdf-c683-4f45-a448-dcdfadbd9731" Feb 18 00:53:54 crc kubenswrapper[4791]: E0218 00:53:54.878813 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:c6ad383f55f955902b074d1ee947a2233a5fcbf40698479ae693ce056c80dcc1\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" podUID="4e11abdf-c683-4f45-a448-dcdfadbd9731" Feb 18 00:53:58 crc kubenswrapper[4791]: E0218 00:53:58.864253 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838" Feb 18 00:53:58 crc kubenswrapper[4791]: E0218 00:53:58.864911 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-499fw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-567668f5cf-2klv9_openstack-operators(f1db1f83-f5b1-4c8d-af64-92816b1aa96d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:53:58 crc kubenswrapper[4791]: E0218 00:53:58.867559 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" podUID="f1db1f83-f5b1-4c8d-af64-92816b1aa96d" Feb 18 00:53:58 crc kubenswrapper[4791]: E0218 00:53:58.908276 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:fe85dd595906fac0fe1e7a42215bb306a963cf87d55e07cd2573726b690b2838\\\"\"" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" podUID="f1db1f83-f5b1-4c8d-af64-92816b1aa96d" Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.876879 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79d975b745-4xrps"] Feb 18 00:53:59 crc kubenswrapper[4791]: W0218 00:53:59.883651 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0f7fb62_ebcb_4989_8913_8b4b488df740.slice/crio-d171daf4b739e555c2ff9d558a73021974cff9a2916b6ed50eacb72f4f8beada WatchSource:0}: Error finding container d171daf4b739e555c2ff9d558a73021974cff9a2916b6ed50eacb72f4f8beada: Status 404 returned error can't find the container with id d171daf4b739e555c2ff9d558a73021974cff9a2916b6ed50eacb72f4f8beada Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.898202 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk"] Feb 18 00:53:59 crc kubenswrapper[4791]: W0218 00:53:59.915026 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d602aa9_6246_4b05_8a6c_7d3cfb607a36.slice/crio-cf4e3773d29c0fbebf567b611ab101c6090ab3f14d575dd6a9738d17e59e16d5 WatchSource:0}: Error finding container cf4e3773d29c0fbebf567b611ab101c6090ab3f14d575dd6a9738d17e59e16d5: Status 404 returned error can't find the container with id cf4e3773d29c0fbebf567b611ab101c6090ab3f14d575dd6a9738d17e59e16d5 Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.930425 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" event={"ID":"c0f7fb62-ebcb-4989-8913-8b4b488df740","Type":"ContainerStarted","Data":"d171daf4b739e555c2ff9d558a73021974cff9a2916b6ed50eacb72f4f8beada"} Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.932814 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" event={"ID":"2f773ffa-c1bb-4096-b44d-b01e7d9158c3","Type":"ContainerStarted","Data":"d474a794dd46ef9bad9ad12912b1183ebdd13120c5ac015dbaafb8bdd47b2d7c"} Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.932866 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.959985 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" podStartSLOduration=8.003541318 podStartE2EDuration="30.959968768s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.297250193 +0000 UTC m=+1153.865263363" lastFinishedPulling="2026-02-18 00:53:55.253677643 +0000 UTC m=+1176.821690813" observedRunningTime="2026-02-18 00:53:59.954348663 +0000 UTC m=+1181.522361833" watchObservedRunningTime="2026-02-18 00:53:59.959968768 +0000 UTC m=+1181.527981938" Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.971335 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" event={"ID":"b9ce1944-2b04-437c-9b3e-4cafb6d68ecf","Type":"ContainerStarted","Data":"51a3e11d8bc78aeda618ae217dd3e8867a37de233a3ff7006157eecae9bd7b96"} Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.971516 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.985457 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" event={"ID":"0864bcf0-8b89-4a98-b294-cac0ec858221","Type":"ContainerStarted","Data":"f70f7ffad8c98466bc059f43971e437ccec33f2af784dcd1580c346e6c97a52e"} Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.986177 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:53:59 crc kubenswrapper[4791]: I0218 00:53:59.999279 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" podStartSLOduration=8.0666397 podStartE2EDuration="30.999258589s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.320943881 +0000 UTC m=+1153.888957051" lastFinishedPulling="2026-02-18 00:53:55.25356278 +0000 UTC m=+1176.821575940" observedRunningTime="2026-02-18 00:53:59.998626629 +0000 UTC m=+1181.566639799" watchObservedRunningTime="2026-02-18 00:53:59.999258589 +0000 UTC m=+1181.567271759" Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.002129 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" event={"ID":"cd9b7e04-15d6-4e16-9e30-85bbd31605fa","Type":"ContainerStarted","Data":"1fbefb6eeecfe421d67a09856cdada4caed33138444af841489bb616c815d854"} Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.002479 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.012472 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" event={"ID":"e10a708b-16cd-467f-b166-00429da94123","Type":"ContainerStarted","Data":"330e73b0dbd85f8be8b427ae8e5eb0d148dfd7ff7dea1ca747c3a27b85579890"} Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.013334 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.021971 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp"] Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.027471 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" podStartSLOduration=8.11118142 podStartE2EDuration="30.027451826s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.372592228 +0000 UTC m=+1153.940605398" lastFinishedPulling="2026-02-18 00:53:54.288862634 +0000 UTC m=+1175.856875804" observedRunningTime="2026-02-18 00:54:00.017004621 +0000 UTC m=+1181.585017791" watchObservedRunningTime="2026-02-18 00:54:00.027451826 +0000 UTC m=+1181.595464986" Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.039175 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" podStartSLOduration=8.0703576 podStartE2EDuration="31.03914125s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.284903204 +0000 UTC m=+1153.852916374" lastFinishedPulling="2026-02-18 00:53:55.253686854 +0000 UTC m=+1176.821700024" observedRunningTime="2026-02-18 00:54:00.03369741 +0000 UTC m=+1181.601710580" watchObservedRunningTime="2026-02-18 00:54:00.03914125 +0000 UTC m=+1181.607154420" Feb 18 00:54:00 crc kubenswrapper[4791]: W0218 00:54:00.041390 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe246d6d_b866_4edb_bb80_3c84e27f0caa.slice/crio-617516b49db009e40ecca18cf806b3bab8a949d45dfa673885bdda27665cce0d WatchSource:0}: Error finding container 617516b49db009e40ecca18cf806b3bab8a949d45dfa673885bdda27665cce0d: Status 404 returned error can't find the container with id 617516b49db009e40ecca18cf806b3bab8a949d45dfa673885bdda27665cce0d Feb 18 00:54:00 crc kubenswrapper[4791]: I0218 00:54:00.084876 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" podStartSLOduration=8.452795528 podStartE2EDuration="31.084857861s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:31.656801271 +0000 UTC m=+1153.224814431" lastFinishedPulling="2026-02-18 00:53:54.288863594 +0000 UTC m=+1175.856876764" observedRunningTime="2026-02-18 00:54:00.065358775 +0000 UTC m=+1181.633371945" watchObservedRunningTime="2026-02-18 00:54:00.084857861 +0000 UTC m=+1181.652871031" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.029667 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" event={"ID":"be30d6a9-7cd4-482d-a12d-e21de55366c1","Type":"ContainerStarted","Data":"77b88ef4c9625f6b780f6e334b7e5ddc423f7dbe6dc74bfefe1c58f6bed5f3d7"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.030228 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.037182 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" event={"ID":"189a189d-5801-4406-8450-1cff37f84bbb","Type":"ContainerStarted","Data":"b4660d58fb02fdc2d764d910179b4db3d51b7e65fc36340ddd52032b05dce640"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.037443 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.039918 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" event={"ID":"819eaf55-1011-4861-bca0-7aecb14098c3","Type":"ContainerStarted","Data":"7e7196d66a293952f0d0484d0d849fc8e7d42e6b8e41f53f3b1b8b786a2b1c82"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.039991 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.041445 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" event={"ID":"be246d6d-b866-4edb-bb80-3c84e27f0caa","Type":"ContainerStarted","Data":"617516b49db009e40ecca18cf806b3bab8a949d45dfa673885bdda27665cce0d"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.045184 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" event={"ID":"0d602aa9-6246-4b05-8a6c-7d3cfb607a36","Type":"ContainerStarted","Data":"7b723b177ff9a1659a0d90472ffe0c995f8b71a871864e9329fb58a250ffea23"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.045212 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" event={"ID":"0d602aa9-6246-4b05-8a6c-7d3cfb607a36","Type":"ContainerStarted","Data":"cf4e3773d29c0fbebf567b611ab101c6090ab3f14d575dd6a9738d17e59e16d5"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.045414 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.052847 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" podStartSLOduration=4.693393813 podStartE2EDuration="31.052831479s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.19267144 +0000 UTC m=+1154.760684610" lastFinishedPulling="2026-02-18 00:53:59.552109106 +0000 UTC m=+1181.120122276" observedRunningTime="2026-02-18 00:54:01.046494201 +0000 UTC m=+1182.614507381" watchObservedRunningTime="2026-02-18 00:54:01.052831479 +0000 UTC m=+1182.620844649" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.056192 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" event={"ID":"35eeab77-8059-4f0c-8742-3f72c2ffab54","Type":"ContainerStarted","Data":"8d0c472f8b7f7a570436d0a6fdef026f7c449a0f6225f4f4cb04b809ecd978ed"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.056427 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.057633 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" event={"ID":"692a8f45-9351-4d64-9571-20f46a3bd0ba","Type":"ContainerStarted","Data":"2358fe0a0e750e27d61497523f18190fc34b7f9f79a6408e48aee3eba0fcd4a3"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.060187 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" event={"ID":"38f179b0-7f33-4d39-aa94-2ca4292ac94e","Type":"ContainerStarted","Data":"6a51507f30d85b93b7b6f75ea294ed1ea89d2ff9eba144b5cb08760a22b9d6b7"} Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.060626 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.082859 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" podStartSLOduration=31.082838391 podStartE2EDuration="31.082838391s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:54:01.070069095 +0000 UTC m=+1182.638082275" watchObservedRunningTime="2026-02-18 00:54:01.082838391 +0000 UTC m=+1182.650851561" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.094574 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" podStartSLOduration=8.515627154 podStartE2EDuration="32.094545515s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:31.675616619 +0000 UTC m=+1153.243629789" lastFinishedPulling="2026-02-18 00:53:55.25453498 +0000 UTC m=+1176.822548150" observedRunningTime="2026-02-18 00:54:01.090450658 +0000 UTC m=+1182.658463828" watchObservedRunningTime="2026-02-18 00:54:01.094545515 +0000 UTC m=+1182.662558685" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.130448 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" podStartSLOduration=5.043075869 podStartE2EDuration="31.130431471s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.347294722 +0000 UTC m=+1154.915307892" lastFinishedPulling="2026-02-18 00:53:59.434650324 +0000 UTC m=+1181.002663494" observedRunningTime="2026-02-18 00:54:01.130123201 +0000 UTC m=+1182.698136381" watchObservedRunningTime="2026-02-18 00:54:01.130431471 +0000 UTC m=+1182.698444641" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.151949 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" podStartSLOduration=5.09630306 podStartE2EDuration="31.151933309s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.387387315 +0000 UTC m=+1154.955400475" lastFinishedPulling="2026-02-18 00:53:59.443017554 +0000 UTC m=+1181.011030724" observedRunningTime="2026-02-18 00:54:01.145448378 +0000 UTC m=+1182.713461548" watchObservedRunningTime="2026-02-18 00:54:01.151933309 +0000 UTC m=+1182.719946479" Feb 18 00:54:01 crc kubenswrapper[4791]: I0218 00:54:01.172757 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wl9hf" podStartSLOduration=5.02517768 podStartE2EDuration="31.172739817s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.412909789 +0000 UTC m=+1154.980922959" lastFinishedPulling="2026-02-18 00:53:59.560471926 +0000 UTC m=+1181.128485096" observedRunningTime="2026-02-18 00:54:01.161351863 +0000 UTC m=+1182.729365033" watchObservedRunningTime="2026-02-18 00:54:01.172739817 +0000 UTC m=+1182.740752987" Feb 18 00:54:02 crc kubenswrapper[4791]: I0218 00:54:02.075940 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" podStartSLOduration=5.549389961 podStartE2EDuration="32.075923069s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.130425928 +0000 UTC m=+1154.698439098" lastFinishedPulling="2026-02-18 00:53:59.656959036 +0000 UTC m=+1181.224972206" observedRunningTime="2026-02-18 00:54:01.183901074 +0000 UTC m=+1182.751914244" watchObservedRunningTime="2026-02-18 00:54:02.075923069 +0000 UTC m=+1183.643936239" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.094512 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" event={"ID":"c0f7fb62-ebcb-4989-8913-8b4b488df740","Type":"ContainerStarted","Data":"3d4681942854c7785fa6e72b0a8a299f87bff36392eff9775b2c8e2e973f7176"} Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.095056 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.097760 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" event={"ID":"e029392b-011d-4edb-84bf-851fb6e9828f","Type":"ContainerStarted","Data":"69b74cd825af7ba4cdcff4d6668468b26514bb02ba7cf6ed26fbceb8b6fe04fd"} Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.098252 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.099812 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" event={"ID":"be246d6d-b866-4edb-bb80-3c84e27f0caa","Type":"ContainerStarted","Data":"cdfda685a900657d97f1956df50fe9713e29071250319e578c74633effc09c6b"} Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.100047 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.126698 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" podStartSLOduration=31.541693405 podStartE2EDuration="35.126683125s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:59.886764491 +0000 UTC m=+1181.454777661" lastFinishedPulling="2026-02-18 00:54:03.471754211 +0000 UTC m=+1185.039767381" observedRunningTime="2026-02-18 00:54:04.119085139 +0000 UTC m=+1185.687098309" watchObservedRunningTime="2026-02-18 00:54:04.126683125 +0000 UTC m=+1185.694696295" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.165316 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" podStartSLOduration=30.75188141 podStartE2EDuration="34.165298695s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:54:00.058335516 +0000 UTC m=+1181.626348686" lastFinishedPulling="2026-02-18 00:54:03.471752801 +0000 UTC m=+1185.039765971" observedRunningTime="2026-02-18 00:54:04.161405174 +0000 UTC m=+1185.729418344" watchObservedRunningTime="2026-02-18 00:54:04.165298695 +0000 UTC m=+1185.733311865" Feb 18 00:54:04 crc kubenswrapper[4791]: I0218 00:54:04.188270 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" podStartSLOduration=4.107738 podStartE2EDuration="35.188251559s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.391270873 +0000 UTC m=+1153.959284043" lastFinishedPulling="2026-02-18 00:54:03.471784432 +0000 UTC m=+1185.039797602" observedRunningTime="2026-02-18 00:54:04.179470596 +0000 UTC m=+1185.747483776" watchObservedRunningTime="2026-02-18 00:54:04.188251559 +0000 UTC m=+1185.756264729" Feb 18 00:54:06 crc kubenswrapper[4791]: I0218 00:54:06.122712 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" event={"ID":"2c9de6b9-8cd5-4082-b89e-88958f7cb27e","Type":"ContainerStarted","Data":"83d9eee9e187ab98849532033110535a9dcc2feff993c7af7d41b73acfdd558e"} Feb 18 00:54:06 crc kubenswrapper[4791]: I0218 00:54:06.123433 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:54:06 crc kubenswrapper[4791]: I0218 00:54:06.186775 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" podStartSLOduration=3.647536277 podStartE2EDuration="36.186753949s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.130293154 +0000 UTC m=+1154.698306324" lastFinishedPulling="2026-02-18 00:54:05.669510826 +0000 UTC m=+1187.237523996" observedRunningTime="2026-02-18 00:54:06.17489025 +0000 UTC m=+1187.742903430" watchObservedRunningTime="2026-02-18 00:54:06.186753949 +0000 UTC m=+1187.754767119" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.132599 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" event={"ID":"4e11abdf-c683-4f45-a448-dcdfadbd9731","Type":"ContainerStarted","Data":"a31b8755256bc917f8b248372a5b1d5e8981983f6658e89a9ec250a0debbc91f"} Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.133144 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.134237 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" event={"ID":"a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6","Type":"ContainerStarted","Data":"407965d9a74a0c980119804ee23ef0861d710050e5aaac2d79f8630c7179fe63"} Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.134486 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.135840 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" event={"ID":"af444709-16b7-4e86-bf27-b0f4bcbd07d6","Type":"ContainerStarted","Data":"5d1a992abb81e913904bab86a98e883ed1cb74772cbb05496012570aff2f4d49"} Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.136090 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.137381 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" event={"ID":"7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177","Type":"ContainerStarted","Data":"fea0bd6ab2d7218f3eb52338faf974594e8479a87401b6608aeaeb48f8b5513f"} Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.137670 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.155050 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" podStartSLOduration=3.9581551040000003 podStartE2EDuration="38.155035726s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:32.323527661 +0000 UTC m=+1153.891540831" lastFinishedPulling="2026-02-18 00:54:06.520408283 +0000 UTC m=+1188.088421453" observedRunningTime="2026-02-18 00:54:07.153395385 +0000 UTC m=+1188.721408545" watchObservedRunningTime="2026-02-18 00:54:07.155035726 +0000 UTC m=+1188.723048896" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.170502 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" podStartSLOduration=3.62672115 podStartE2EDuration="37.170484877s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.047937913 +0000 UTC m=+1154.615951083" lastFinishedPulling="2026-02-18 00:54:06.59170163 +0000 UTC m=+1188.159714810" observedRunningTime="2026-02-18 00:54:07.166084809 +0000 UTC m=+1188.734097979" watchObservedRunningTime="2026-02-18 00:54:07.170484877 +0000 UTC m=+1188.738498037" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.186014 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" podStartSLOduration=3.789362399 podStartE2EDuration="37.185987698s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.121648788 +0000 UTC m=+1154.689661958" lastFinishedPulling="2026-02-18 00:54:06.518274087 +0000 UTC m=+1188.086287257" observedRunningTime="2026-02-18 00:54:07.181603553 +0000 UTC m=+1188.749616743" watchObservedRunningTime="2026-02-18 00:54:07.185987698 +0000 UTC m=+1188.754000888" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.203642 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" podStartSLOduration=3.7214918409999997 podStartE2EDuration="37.203625107s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.036711848 +0000 UTC m=+1154.604725018" lastFinishedPulling="2026-02-18 00:54:06.518845114 +0000 UTC m=+1188.086858284" observedRunningTime="2026-02-18 00:54:07.197853008 +0000 UTC m=+1188.765866178" watchObservedRunningTime="2026-02-18 00:54:07.203625107 +0000 UTC m=+1188.771638277" Feb 18 00:54:07 crc kubenswrapper[4791]: I0218 00:54:07.622088 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-669759659c-xt6gk" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.165396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" event={"ID":"e8318b6f-bf0c-447f-a43c-0ac54f9c60a4","Type":"ContainerStarted","Data":"e8b7fff76cbed7bad9193ab00d7c95523757184cd705f1648a8643880759be14"} Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.166321 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.168043 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" event={"ID":"ea2ae9de-8373-4a24-bbe8-2308ecc8dad2","Type":"ContainerStarted","Data":"af21a73356062daabbc7a60aff8d26ae61046a63d590d912d7fb275451d476b1"} Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.168285 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.188296 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-868647ff47-kcd8v" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.189370 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" podStartSLOduration=3.656981638 podStartE2EDuration="41.189349943s" podCreationTimestamp="2026-02-18 00:53:29 +0000 UTC" firstStartedPulling="2026-02-18 00:53:31.907608428 +0000 UTC m=+1153.475621598" lastFinishedPulling="2026-02-18 00:54:09.439976733 +0000 UTC m=+1191.007989903" observedRunningTime="2026-02-18 00:54:10.184272825 +0000 UTC m=+1191.752286005" watchObservedRunningTime="2026-02-18 00:54:10.189349943 +0000 UTC m=+1191.757363113" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.202030 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-5d946d989d-pbsxj" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.213671 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" podStartSLOduration=4.151404921 podStartE2EDuration="40.213644299s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.057026322 +0000 UTC m=+1154.625039492" lastFinishedPulling="2026-02-18 00:54:09.11926571 +0000 UTC m=+1190.687278870" observedRunningTime="2026-02-18 00:54:10.202744549 +0000 UTC m=+1191.770757749" watchObservedRunningTime="2026-02-18 00:54:10.213644299 +0000 UTC m=+1191.781657479" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.280174 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-77987464f4-k2x8h" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.384817 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69f49c598c-dhrvm" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.604855 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-554564d7fc-265gr" Feb 18 00:54:10 crc kubenswrapper[4791]: I0218 00:54:10.605307 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5b9b8895d5-nq4q5" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.401750 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-64ddbf8bb-9xxkb" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.402990 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b4d948c87-jqbhr" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.403733 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-54f6768c69-mw585" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.469337 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-d44cf6b75-97ckn" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.553741 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6994f66f48-9sxdc" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.724376 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-69f8888797-gz2td" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.792111 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-8497b45c89-47vp7" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.852982 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68f46476f-5tvb7" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.950226 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7866795846-bx4hv" Feb 18 00:54:11 crc kubenswrapper[4791]: I0218 00:54:11.958363 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5db88f68c-sjggr" Feb 18 00:54:13 crc kubenswrapper[4791]: I0218 00:54:13.199044 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" event={"ID":"f1db1f83-f5b1-4c8d-af64-92816b1aa96d","Type":"ContainerStarted","Data":"c7549c651819a7860db85296b1aaeefcb67b30d6e8153a4ade7df078ed24dfea"} Feb 18 00:54:13 crc kubenswrapper[4791]: I0218 00:54:13.199627 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:54:13 crc kubenswrapper[4791]: I0218 00:54:13.222977 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" podStartSLOduration=3.808296434 podStartE2EDuration="43.222957488s" podCreationTimestamp="2026-02-18 00:53:30 +0000 UTC" firstStartedPulling="2026-02-18 00:53:33.046766266 +0000 UTC m=+1154.614779426" lastFinishedPulling="2026-02-18 00:54:12.46142731 +0000 UTC m=+1194.029440480" observedRunningTime="2026-02-18 00:54:13.215797695 +0000 UTC m=+1194.783810875" watchObservedRunningTime="2026-02-18 00:54:13.222957488 +0000 UTC m=+1194.790970658" Feb 18 00:54:16 crc kubenswrapper[4791]: I0218 00:54:16.764033 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79d975b745-4xrps" Feb 18 00:54:17 crc kubenswrapper[4791]: I0218 00:54:17.692458 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp" Feb 18 00:54:20 crc kubenswrapper[4791]: I0218 00:54:20.219096 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d8bf5c495-nzmq6" Feb 18 00:54:21 crc kubenswrapper[4791]: I0218 00:54:21.562784 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-567668f5cf-2klv9" Feb 18 00:54:21 crc kubenswrapper[4791]: I0218 00:54:21.892109 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-c6f9cb8b-mnkq9" Feb 18 00:54:26 crc kubenswrapper[4791]: I0218 00:54:26.800274 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:54:26 crc kubenswrapper[4791]: I0218 00:54:26.800850 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.435287 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.437290 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.448632 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.449222 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.449343 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-2zkqz" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.449445 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.467890 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.533524 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.535345 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.553431 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.589242 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.622992 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.623132 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcgrq\" (UniqueName: \"kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.724667 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcgrq\" (UniqueName: \"kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.725036 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.725085 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drp5n\" (UniqueName: \"kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.725288 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.725353 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.726327 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.745526 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcgrq\" (UniqueName: \"kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq\") pod \"dnsmasq-dns-675f4bcbfc-p9kdj\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.825607 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.828360 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.828443 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.828486 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drp5n\" (UniqueName: \"kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.829215 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.829327 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.860526 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drp5n\" (UniqueName: \"kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n\") pod \"dnsmasq-dns-78dd6ddcc-v46bk\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:40 crc kubenswrapper[4791]: I0218 00:54:40.884298 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:54:41 crc kubenswrapper[4791]: I0218 00:54:41.318090 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:54:41 crc kubenswrapper[4791]: I0218 00:54:41.402901 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:54:41 crc kubenswrapper[4791]: I0218 00:54:41.495912 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" event={"ID":"c07c6f72-51a8-45c7-a103-d24460d4f15a","Type":"ContainerStarted","Data":"acc396869454858aab9fe727c034f3ca1ffb8f9efa7659f9cbe240d923f97166"} Feb 18 00:54:41 crc kubenswrapper[4791]: I0218 00:54:41.497108 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" event={"ID":"4d48bffb-aea0-4fb7-85ee-239425c1627a","Type":"ContainerStarted","Data":"44d842233afd1bdb4ff83fa5f8ad0996c5389b5bda38391778339756f7cfd040"} Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.209023 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.242784 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.244277 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.294875 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.297788 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.297819 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.298150 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clsqr\" (UniqueName: \"kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.401289 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clsqr\" (UniqueName: \"kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.401372 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.401418 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.402275 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.403259 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.439023 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clsqr\" (UniqueName: \"kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr\") pod \"dnsmasq-dns-666b6646f7-grhf9\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.616009 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.631002 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.690708 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.693062 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.711851 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.711908 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmvcl\" (UniqueName: \"kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.711982 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.723482 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.813280 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.813378 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.813416 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmvcl\" (UniqueName: \"kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.814321 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.814493 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:43 crc kubenswrapper[4791]: I0218 00:54:43.842128 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmvcl\" (UniqueName: \"kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl\") pod \"dnsmasq-dns-57d769cc4f-2nbhb\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.092920 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.308005 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.406744 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.408506 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.415071 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.415388 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.415531 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.415776 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.416055 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.416517 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-httct" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.416963 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.420062 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.429050 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.430686 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.436832 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.450050 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.451908 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.481848 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.530791 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.530846 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.530875 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.530894 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.530985 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkc6z\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532173 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532493 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532529 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532581 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.532600 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.610924 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" event={"ID":"71baa5bb-d1af-486b-a025-ca9e786d2382","Type":"ContainerStarted","Data":"8453721944eff427de9f240b813e8570c57c7280c83fce04b6c3bc2a1be2ac32"} Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635030 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635080 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635104 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635130 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635149 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635182 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635203 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkc6z\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635225 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635244 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635261 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635291 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635325 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp56k\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635352 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635379 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635400 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635421 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635438 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635453 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635477 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635503 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635523 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635555 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635584 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635598 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635616 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635634 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635650 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635668 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635693 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635718 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd7qb\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635733 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635751 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.635766 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.637771 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.638311 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.639138 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.639466 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.642182 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.642358 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.646455 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.646471 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.646502 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.646530 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3f899ddc76ac764fa06ebb180a6e42e627d315f5b0d60d5f18cf3a3154ff692c/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.659432 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.661886 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkc6z\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.666476 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: W0218 00:54:44.667126 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod864417f3_df49_4f21_b1a7_8895951b7914.slice/crio-1c27facc4838726eccc6855f42697262954be1544f233e87d857b6bfdcb8a5a1 WatchSource:0}: Error finding container 1c27facc4838726eccc6855f42697262954be1544f233e87d857b6bfdcb8a5a1: Status 404 returned error can't find the container with id 1c27facc4838726eccc6855f42697262954be1544f233e87d857b6bfdcb8a5a1 Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.707616 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737694 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737761 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737793 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737825 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp56k\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737841 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737856 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737875 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737892 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737908 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737942 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737971 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.737996 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738014 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738028 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738046 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738067 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738083 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd7qb\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738098 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738113 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738132 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738150 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738184 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.738879 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.739200 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.739428 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.740070 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.742671 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.745778 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.746716 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.748661 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.750150 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.751506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.755483 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.758383 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.760359 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.760397 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d80776f0b0e6a29b061cf3e7bee3ac6a238caf9eab0bcc21880227f40c07f67b/globalmount\"" pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.760437 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.760465 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8fe3627c0588b651edad1eb27c527cf6c3fb938e5bed6793f718baee94b2ccc0/globalmount\"" pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.765139 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.765659 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.766905 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.767716 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.772136 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.772236 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.772589 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.777343 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd7qb\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.779819 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp56k\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.810811 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " pod="openstack/rabbitmq-server-1" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.824932 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " pod="openstack/rabbitmq-server-2" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.836330 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.838067 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.841546 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-w7wn5" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.841706 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.841817 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.841927 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.841972 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.842065 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.842129 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.924032 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.958575 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.958754 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.958774 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.958881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.958943 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959122 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959218 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959262 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959319 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959381 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tzjn\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:44 crc kubenswrapper[4791]: I0218 00:54:44.959410 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.063439 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.063510 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tzjn\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.063540 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064418 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064442 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064458 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064493 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064518 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064567 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064595 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064620 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.066382 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.068002 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.064187 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.069384 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.069857 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.071123 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.071520 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.071547 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bda4c13c6af3a740a356a538e78a89662164c12658c511cf721ff5cb1d0d32f4/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.072340 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.073903 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.083651 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.084621 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tzjn\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.090574 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.103816 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.127281 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.219939 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.445699 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.649531 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" event={"ID":"864417f3-df49-4f21-b1a7-8895951b7914","Type":"ContainerStarted","Data":"1c27facc4838726eccc6855f42697262954be1544f233e87d857b6bfdcb8a5a1"} Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.893541 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.895504 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.897786 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-5rbh6" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.899147 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.899362 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.899486 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.900452 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Feb 18 00:54:45 crc kubenswrapper[4791]: I0218 00:54:45.919762 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.002846 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.002899 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.002917 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.003143 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.003248 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.003416 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.003539 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hncm9\" (UniqueName: \"kubernetes.io/projected/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kube-api-access-hncm9\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.003567 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.104922 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.104983 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.104998 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.105035 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.105059 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.105104 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.105149 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hncm9\" (UniqueName: \"kubernetes.io/projected/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kube-api-access-hncm9\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.105228 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.106427 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.117527 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-config-data-default\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.117650 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kolla-config\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.118136 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.122732 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.122784 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1acd4915a16451202531162b3c31b683caf7a27edded203858d1c76def2af774/globalmount\"" pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.123185 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.126268 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.140877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hncm9\" (UniqueName: \"kubernetes.io/projected/f9e1b835-c4bf-4722-b4f5-512b3439fdfb-kube-api-access-hncm9\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.197177 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-622fa2f2-e27f-4f7c-84ae-b97de8e0bcd2\") pod \"openstack-galera-0\" (UID: \"f9e1b835-c4bf-4722-b4f5-512b3439fdfb\") " pod="openstack/openstack-galera-0" Feb 18 00:54:46 crc kubenswrapper[4791]: I0218 00:54:46.233330 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.203698 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.205853 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.208983 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-5kt85" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.209247 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.209412 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.209523 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.221911 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.350907 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w4z6\" (UniqueName: \"kubernetes.io/projected/af0c20ed-84d0-4674-8664-1de72a190f84-kube-api-access-6w4z6\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351231 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351446 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351472 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351731 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351765 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.351877 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.445729 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.447344 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.449072 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.449195 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.449507 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-wjrrs" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453138 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453241 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w4z6\" (UniqueName: \"kubernetes.io/projected/af0c20ed-84d0-4674-8664-1de72a190f84-kube-api-access-6w4z6\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453291 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453350 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453379 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453403 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453456 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.453481 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.454330 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.454478 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.454747 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.455122 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c20ed-84d0-4674-8664-1de72a190f84-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.457863 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.463939 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.463975 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/716a5ef8cdc699d860124f2138d13ce1c7dd36d2bf69bf780639f73a4a5a9e5c/globalmount\"" pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.467346 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/af0c20ed-84d0-4674-8664-1de72a190f84-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.475395 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.482856 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w4z6\" (UniqueName: \"kubernetes.io/projected/af0c20ed-84d0-4674-8664-1de72a190f84-kube-api-access-6w4z6\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.521443 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d185a328-501a-42ac-b5f5-15e9ad9cf388\") pod \"openstack-cell1-galera-0\" (UID: \"af0c20ed-84d0-4674-8664-1de72a190f84\") " pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.555442 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.555528 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbfl8\" (UniqueName: \"kubernetes.io/projected/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kube-api-access-qbfl8\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.555563 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kolla-config\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.555600 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.555647 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-config-data\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.657715 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.657807 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbfl8\" (UniqueName: \"kubernetes.io/projected/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kube-api-access-qbfl8\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.657842 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kolla-config\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.657880 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.657926 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-config-data\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.658648 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-config-data\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.662137 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.664239 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kolla-config\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.668627 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.677809 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbfl8\" (UniqueName: \"kubernetes.io/projected/e3e07d33-a963-4a49-b9f4-eb2b867eae6a-kube-api-access-qbfl8\") pod \"memcached-0\" (UID: \"e3e07d33-a963-4a49-b9f4-eb2b867eae6a\") " pod="openstack/memcached-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.823464 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Feb 18 00:54:47 crc kubenswrapper[4791]: I0218 00:54:47.833831 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Feb 18 00:54:49 crc kubenswrapper[4791]: I0218 00:54:49.958624 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 00:54:49 crc kubenswrapper[4791]: I0218 00:54:49.960309 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 00:54:49 crc kubenswrapper[4791]: I0218 00:54:49.962600 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-j8c4p" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:49.998327 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.112914 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdgqh\" (UniqueName: \"kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh\") pod \"kube-state-metrics-0\" (UID: \"ccdd7e1f-2546-4f46-93cd-7e07d3db2182\") " pod="openstack/kube-state-metrics-0" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.214640 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdgqh\" (UniqueName: \"kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh\") pod \"kube-state-metrics-0\" (UID: \"ccdd7e1f-2546-4f46-93cd-7e07d3db2182\") " pod="openstack/kube-state-metrics-0" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.244782 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdgqh\" (UniqueName: \"kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh\") pod \"kube-state-metrics-0\" (UID: \"ccdd7e1f-2546-4f46-93cd-7e07d3db2182\") " pod="openstack/kube-state-metrics-0" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.291480 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.753806 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerStarted","Data":"4a1f9d9450293995e12f78d3d871e2be63f090c468a6fe6e1c99db1d9fb8b5ad"} Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.833910 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw"] Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.835261 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.843206 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.843518 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-zzsz4" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.853138 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw"] Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.861215 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7p5f\" (UniqueName: \"kubernetes.io/projected/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-kube-api-access-t7p5f\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.861979 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.962712 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.962847 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7p5f\" (UniqueName: \"kubernetes.io/projected/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-kube-api-access-t7p5f\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:50 crc kubenswrapper[4791]: E0218 00:54:50.963241 4791 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Feb 18 00:54:50 crc kubenswrapper[4791]: E0218 00:54:50.963287 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert podName:617ebaca-caf6-45cf-92b1-f1bb067bf2f1 nodeName:}" failed. No retries permitted until 2026-02-18 00:54:51.463270487 +0000 UTC m=+1233.031283647 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert") pod "observability-ui-dashboards-66cbf594b5-z78rw" (UID: "617ebaca-caf6-45cf-92b1-f1bb067bf2f1") : secret "observability-ui-dashboards" not found Feb 18 00:54:50 crc kubenswrapper[4791]: I0218 00:54:50.993932 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7p5f\" (UniqueName: \"kubernetes.io/projected/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-kube-api-access-t7p5f\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.214413 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-76c748cbff-8c4wc"] Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.215931 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.253774 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-76c748cbff-8c4wc"] Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271122 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-oauth-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271200 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrm44\" (UniqueName: \"kubernetes.io/projected/03480f27-1247-4293-9725-bd33cb3f7841-kube-api-access-jrm44\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271239 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-oauth-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271267 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271356 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-trusted-ca-bundle\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271381 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-console-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.271411 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-service-ca\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.325428 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.327727 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.336069 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.344069 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.345483 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.348323 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.348424 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.348496 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-g9qqq" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.348557 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.349616 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.349705 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.373715 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-service-ca\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374024 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-oauth-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374181 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrm44\" (UniqueName: \"kubernetes.io/projected/03480f27-1247-4293-9725-bd33cb3f7841-kube-api-access-jrm44\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374356 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-oauth-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374490 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374694 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-trusted-ca-bundle\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.374787 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-console-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.375831 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-service-ca\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.388909 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-oauth-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.392946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-console-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.393961 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03480f27-1247-4293-9725-bd33cb3f7841-trusted-ca-bundle\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.395767 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-oauth-config\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.399845 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/03480f27-1247-4293-9725-bd33cb3f7841-console-serving-cert\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.456465 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrm44\" (UniqueName: \"kubernetes.io/projected/03480f27-1247-4293-9725-bd33cb3f7841-kube-api-access-jrm44\") pod \"console-76c748cbff-8c4wc\" (UID: \"03480f27-1247-4293-9725-bd33cb3f7841\") " pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476769 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476829 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476859 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476890 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z67q\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476922 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476953 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.476987 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.477013 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.477040 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.477076 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.477093 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.479972 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/617ebaca-caf6-45cf-92b1-f1bb067bf2f1-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-z78rw\" (UID: \"617ebaca-caf6-45cf-92b1-f1bb067bf2f1\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.530445 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578364 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578430 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578455 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578500 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578532 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578555 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z67q\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578613 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578646 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.578672 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.579387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.579746 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.579929 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.583881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.584427 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.584706 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.585516 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.585558 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.590043 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.590086 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/08956609b52980c426bf22e33256fb47c97baf3f2e3c40e37bcbe84538d50090/globalmount\"" pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.596948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z67q\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.635713 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.681591 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:54:51 crc kubenswrapper[4791]: I0218 00:54:51.764226 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.680557 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5shlq"] Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.682323 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.687515 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.687612 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-hhm8x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.687808 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.700683 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq"] Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.721585 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-l4g2x"] Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.738950 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.779091 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-l4g2x"] Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807614 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbtvr\" (UniqueName: \"kubernetes.io/projected/54deb01e-caa1-4fe0-8bd0-d412c4d73210-kube-api-access-qbtvr\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807666 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54deb01e-caa1-4fe0-8bd0-d412c4d73210-scripts\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-log-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807817 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807836 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-ovn-controller-tls-certs\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807852 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-combined-ca-bundle\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.807881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.914650 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.914773 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-log\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.914813 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c6l7\" (UniqueName: \"kubernetes.io/projected/8ac723a9-6515-4a69-aca1-95e459bf2047-kube-api-access-5c6l7\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.914850 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-etc-ovs\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915009 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbtvr\" (UniqueName: \"kubernetes.io/projected/54deb01e-caa1-4fe0-8bd0-d412c4d73210-kube-api-access-qbtvr\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915087 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-log-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915111 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54deb01e-caa1-4fe0-8bd0-d412c4d73210-scripts\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915151 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-lib\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915343 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ac723a9-6515-4a69-aca1-95e459bf2047-scripts\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915445 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-run\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915529 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-ovn-controller-tls-certs\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915588 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-combined-ca-bundle\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915659 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915694 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-run\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.915588 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54deb01e-caa1-4fe0-8bd0-d412c4d73210-var-log-ovn\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.917716 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54deb01e-caa1-4fe0-8bd0-d412c4d73210-scripts\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.919407 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-ovn-controller-tls-certs\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.926051 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54deb01e-caa1-4fe0-8bd0-d412c4d73210-combined-ca-bundle\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:52 crc kubenswrapper[4791]: I0218 00:54:52.934350 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbtvr\" (UniqueName: \"kubernetes.io/projected/54deb01e-caa1-4fe0-8bd0-d412c4d73210-kube-api-access-qbtvr\") pod \"ovn-controller-5shlq\" (UID: \"54deb01e-caa1-4fe0-8bd0-d412c4d73210\") " pod="openstack/ovn-controller-5shlq" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.004642 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017404 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-log\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017460 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c6l7\" (UniqueName: \"kubernetes.io/projected/8ac723a9-6515-4a69-aca1-95e459bf2047-kube-api-access-5c6l7\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017503 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-etc-ovs\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017549 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-lib\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017603 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ac723a9-6515-4a69-aca1-95e459bf2047-scripts\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017627 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-run\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017687 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-log\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017742 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-run\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.017920 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-etc-ovs\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.018511 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ac723a9-6515-4a69-aca1-95e459bf2047-var-lib\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.020020 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ac723a9-6515-4a69-aca1-95e459bf2047-scripts\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.036059 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c6l7\" (UniqueName: \"kubernetes.io/projected/8ac723a9-6515-4a69-aca1-95e459bf2047-kube-api-access-5c6l7\") pod \"ovn-controller-ovs-l4g2x\" (UID: \"8ac723a9-6515-4a69-aca1-95e459bf2047\") " pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.082720 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.334014 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.336492 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.339341 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.339360 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.339374 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.339379 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-ft8zh" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.339739 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.379287 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427037 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427308 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-config\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427410 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427455 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427488 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427699 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427765 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4rxh\" (UniqueName: \"kubernetes.io/projected/45f7687b-ba74-41f7-bb24-e34698bb35c4-kube-api-access-s4rxh\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.427795 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529689 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529769 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-config\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529842 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529884 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529910 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529929 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529959 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4rxh\" (UniqueName: \"kubernetes.io/projected/45f7687b-ba74-41f7-bb24-e34698bb35c4-kube-api-access-s4rxh\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.529980 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.531188 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.531420 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-config\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.532804 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45f7687b-ba74-41f7-bb24-e34698bb35c4-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.534506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.534678 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.534710 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/46f7ca44f5b97b0b9d425446d4cd6f2e62f2ebf37df91f29b17b9c3410d6cfcf/globalmount\"" pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.536804 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.544458 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/45f7687b-ba74-41f7-bb24-e34698bb35c4-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.548142 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4rxh\" (UniqueName: \"kubernetes.io/projected/45f7687b-ba74-41f7-bb24-e34698bb35c4-kube-api-access-s4rxh\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.587431 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-15d93d52-1b2f-42bb-8fea-aa1331048275\") pod \"ovsdbserver-nb-0\" (UID: \"45f7687b-ba74-41f7-bb24-e34698bb35c4\") " pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:53 crc kubenswrapper[4791]: I0218 00:54:53.657566 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Feb 18 00:54:55 crc kubenswrapper[4791]: I0218 00:54:55.078735 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.794826 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.799893 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.799945 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.800326 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.804630 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hh4nz" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.804884 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.805010 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.805117 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.809512 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901615 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901741 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901788 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901846 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901890 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn5lq\" (UniqueName: \"kubernetes.io/projected/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-kube-api-access-fn5lq\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901960 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.901988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:56 crc kubenswrapper[4791]: I0218 00:54:56.902021 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004134 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004207 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004237 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004306 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004352 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004387 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004426 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.004453 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn5lq\" (UniqueName: \"kubernetes.io/projected/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-kube-api-access-fn5lq\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.005949 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.006138 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.007001 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.007463 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.007494 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/22f901de668ccddea9378c736d2da44a6b059ffa94f2f23a95046e074adf5a5c/globalmount\"" pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.009818 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.010873 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.011202 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.032453 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn5lq\" (UniqueName: \"kubernetes.io/projected/ebbcd631-eef9-4249-a5a0-7aeef10d5d4e-kube-api-access-fn5lq\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.054584 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de76278f-f572-4ffe-b24e-b0f3e5d89f53\") pod \"ovsdbserver-sb-0\" (UID: \"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e\") " pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:57 crc kubenswrapper[4791]: I0218 00:54:57.129546 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Feb 18 00:54:59 crc kubenswrapper[4791]: W0218 00:54:59.794743 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf0c20ed_84d0_4674_8664_1de72a190f84.slice/crio-5ed738c73b610cdea1a5f890b14fc22eae0b4d2a27bf1e01d8e4d43abfcd12f4 WatchSource:0}: Error finding container 5ed738c73b610cdea1a5f890b14fc22eae0b4d2a27bf1e01d8e4d43abfcd12f4: Status 404 returned error can't find the container with id 5ed738c73b610cdea1a5f890b14fc22eae0b4d2a27bf1e01d8e4d43abfcd12f4 Feb 18 00:54:59 crc kubenswrapper[4791]: I0218 00:54:59.867614 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"af0c20ed-84d0-4674-8664-1de72a190f84","Type":"ContainerStarted","Data":"5ed738c73b610cdea1a5f890b14fc22eae0b4d2a27bf1e01d8e4d43abfcd12f4"} Feb 18 00:55:00 crc kubenswrapper[4791]: I0218 00:55:00.180575 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 00:55:01 crc kubenswrapper[4791]: W0218 00:55:01.003681 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod519e8bd0_f30e_4ff2_be43_b33764a95351.slice/crio-4f7b6b8544d4841de64a2043c9b8a430baa650e2e20ac3b845e6afeff20c1c22 WatchSource:0}: Error finding container 4f7b6b8544d4841de64a2043c9b8a430baa650e2e20ac3b845e6afeff20c1c22: Status 404 returned error can't find the container with id 4f7b6b8544d4841de64a2043c9b8a430baa650e2e20ac3b845e6afeff20c1c22 Feb 18 00:55:01 crc kubenswrapper[4791]: I0218 00:55:01.893101 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerStarted","Data":"4f7b6b8544d4841de64a2043c9b8a430baa650e2e20ac3b845e6afeff20c1c22"} Feb 18 00:55:01 crc kubenswrapper[4791]: E0218 00:55:01.914577 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 18 00:55:01 crc kubenswrapper[4791]: E0218 00:55:01.914780 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rcgrq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-p9kdj_openstack(4d48bffb-aea0-4fb7-85ee-239425c1627a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:55:01 crc kubenswrapper[4791]: E0218 00:55:01.916214 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" podUID="4d48bffb-aea0-4fb7-85ee-239425c1627a" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.010311 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.010709 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-drp5n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-v46bk_openstack(c07c6f72-51a8-45c7-a103-d24460d4f15a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.014220 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" podUID="c07c6f72-51a8-45c7-a103-d24460d4f15a" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.086322 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.086470 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clsqr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-grhf9_openstack(71baa5bb-d1af-486b-a025-ca9e786d2382): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:55:02 crc kubenswrapper[4791]: E0218 00:55:02.089626 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" podUID="71baa5bb-d1af-486b-a025-ca9e786d2382" Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.876965 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.894436 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.908503 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 00:55:02 crc kubenswrapper[4791]: W0218 00:55:02.913575 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9e1b835_c4bf_4722_b4f5_512b3439fdfb.slice/crio-83c8eac893aa098b8b05d84e411173437a8ffcebf5677537182556a028c88cf2 WatchSource:0}: Error finding container 83c8eac893aa098b8b05d84e411173437a8ffcebf5677537182556a028c88cf2: Status 404 returned error can't find the container with id 83c8eac893aa098b8b05d84e411173437a8ffcebf5677537182556a028c88cf2 Feb 18 00:55:02 crc kubenswrapper[4791]: W0218 00:55:02.914148 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bb5fcf5_6cd5_4569_b788_5740edee3793.slice/crio-e6b0d7e7a41b55e4e7d45a2cbb67fda4251545703d01d1523c8bfcf2efa50be6 WatchSource:0}: Error finding container e6b0d7e7a41b55e4e7d45a2cbb67fda4251545703d01d1523c8bfcf2efa50be6: Status 404 returned error can't find the container with id e6b0d7e7a41b55e4e7d45a2cbb67fda4251545703d01d1523c8bfcf2efa50be6 Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.914294 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e3e07d33-a963-4a49-b9f4-eb2b867eae6a","Type":"ContainerStarted","Data":"9eabe1d3e6554bb18aca1a9b818b0e5db77ef41ec149a9f38bde544797578f22"} Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.915980 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.928086 4791 generic.go:334] "Generic (PLEG): container finished" podID="864417f3-df49-4f21-b1a7-8895951b7914" containerID="0c6df0e82cab763400d75f36a537b7cf35cb345e0c08a19ed5352226b7fb37fb" exitCode=0 Feb 18 00:55:02 crc kubenswrapper[4791]: I0218 00:55:02.929826 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" event={"ID":"864417f3-df49-4f21-b1a7-8895951b7914","Type":"ContainerDied","Data":"0c6df0e82cab763400d75f36a537b7cf35cb345e0c08a19ed5352226b7fb37fb"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.751239 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-l4g2x"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.768279 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-76c748cbff-8c4wc"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.782971 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.811520 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.848798 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.881743 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.900800 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.918312 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.927055 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.933701 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config\") pod \"4d48bffb-aea0-4fb7-85ee-239425c1627a\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.933934 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcgrq\" (UniqueName: \"kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq\") pod \"4d48bffb-aea0-4fb7-85ee-239425c1627a\" (UID: \"4d48bffb-aea0-4fb7-85ee-239425c1627a\") " Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.934026 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config\") pod \"c07c6f72-51a8-45c7-a103-d24460d4f15a\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.934147 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drp5n\" (UniqueName: \"kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n\") pod \"c07c6f72-51a8-45c7-a103-d24460d4f15a\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.934297 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc\") pod \"c07c6f72-51a8-45c7-a103-d24460d4f15a\" (UID: \"c07c6f72-51a8-45c7-a103-d24460d4f15a\") " Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.935854 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c07c6f72-51a8-45c7-a103-d24460d4f15a" (UID: "c07c6f72-51a8-45c7-a103-d24460d4f15a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.936444 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config" (OuterVolumeSpecName: "config") pod "4d48bffb-aea0-4fb7-85ee-239425c1627a" (UID: "4d48bffb-aea0-4fb7-85ee-239425c1627a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.937870 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config" (OuterVolumeSpecName: "config") pod "c07c6f72-51a8-45c7-a103-d24460d4f15a" (UID: "c07c6f72-51a8-45c7-a103-d24460d4f15a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.942146 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq" (OuterVolumeSpecName: "kube-api-access-rcgrq") pod "4d48bffb-aea0-4fb7-85ee-239425c1627a" (UID: "4d48bffb-aea0-4fb7-85ee-239425c1627a"). InnerVolumeSpecName "kube-api-access-rcgrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.945031 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l4g2x" event={"ID":"8ac723a9-6515-4a69-aca1-95e459bf2047","Type":"ContainerStarted","Data":"c489a1adefd3e8ef64168c0143e223eb6c750fff42a10fc026b56aef43fbd5fd"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.949049 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n" (OuterVolumeSpecName: "kube-api-access-drp5n") pod "c07c6f72-51a8-45c7-a103-d24460d4f15a" (UID: "c07c6f72-51a8-45c7-a103-d24460d4f15a"). InnerVolumeSpecName "kube-api-access-drp5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.949786 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerStarted","Data":"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.954913 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerStarted","Data":"e6b0d7e7a41b55e4e7d45a2cbb67fda4251545703d01d1523c8bfcf2efa50be6"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.957292 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" event={"ID":"4d48bffb-aea0-4fb7-85ee-239425c1627a","Type":"ContainerDied","Data":"44d842233afd1bdb4ff83fa5f8ad0996c5389b5bda38391778339756f7cfd040"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.957329 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-p9kdj" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.963223 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" event={"ID":"864417f3-df49-4f21-b1a7-8895951b7914","Type":"ContainerStarted","Data":"722be72ca704049c042c7541c41c34b4d5e9ab28c0219f63370e376e97c80e55"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.963297 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.966622 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerStarted","Data":"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.969236 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9e1b835-c4bf-4722-b4f5-512b3439fdfb","Type":"ContainerStarted","Data":"83c8eac893aa098b8b05d84e411173437a8ffcebf5677537182556a028c88cf2"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.977049 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" event={"ID":"c07c6f72-51a8-45c7-a103-d24460d4f15a","Type":"ContainerDied","Data":"acc396869454858aab9fe727c034f3ca1ffb8f9efa7659f9cbe240d923f97166"} Feb 18 00:55:03 crc kubenswrapper[4791]: I0218 00:55:03.977299 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-v46bk" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.001727 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76c748cbff-8c4wc" event={"ID":"03480f27-1247-4293-9725-bd33cb3f7841","Type":"ContainerStarted","Data":"3f1497b74d83ec81dcd7e055091387bed335ad534ab77d71d65dd6999afc173b"} Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.004956 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ccdd7e1f-2546-4f46-93cd-7e07d3db2182","Type":"ContainerStarted","Data":"b27828fe159a614bd83755a6997f3a5c339d879bb29f9784ec44c8423ded16eb"} Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.013913 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" podStartSLOduration=3.479898724 podStartE2EDuration="21.013895314s" podCreationTimestamp="2026-02-18 00:54:43 +0000 UTC" firstStartedPulling="2026-02-18 00:54:44.673138856 +0000 UTC m=+1226.241152026" lastFinishedPulling="2026-02-18 00:55:02.207135456 +0000 UTC m=+1243.775148616" observedRunningTime="2026-02-18 00:55:04.012028006 +0000 UTC m=+1245.580041186" watchObservedRunningTime="2026-02-18 00:55:04.013895314 +0000 UTC m=+1245.581908484" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.043305 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.043353 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drp5n\" (UniqueName: \"kubernetes.io/projected/c07c6f72-51a8-45c7-a103-d24460d4f15a-kube-api-access-drp5n\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.043364 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c07c6f72-51a8-45c7-a103-d24460d4f15a-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.043374 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d48bffb-aea0-4fb7-85ee-239425c1627a-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.043384 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcgrq\" (UniqueName: \"kubernetes.io/projected/4d48bffb-aea0-4fb7-85ee-239425c1627a-kube-api-access-rcgrq\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:04 crc kubenswrapper[4791]: E0218 00:55:04.095866 4791 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Feb 18 00:55:04 crc kubenswrapper[4791]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Feb 18 00:55:04 crc kubenswrapper[4791]: > podSandboxID="8453721944eff427de9f240b813e8570c57c7280c83fce04b6c3bc2a1be2ac32" Feb 18 00:55:04 crc kubenswrapper[4791]: E0218 00:55:04.096003 4791 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 18 00:55:04 crc kubenswrapper[4791]: init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clsqr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-grhf9_openstack(71baa5bb-d1af-486b-a025-ca9e786d2382): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Feb 18 00:55:04 crc kubenswrapper[4791]: > logger="UnhandledError" Feb 18 00:55:04 crc kubenswrapper[4791]: E0218 00:55:04.097191 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" podUID="71baa5bb-d1af-486b-a025-ca9e786d2382" Feb 18 00:55:04 crc kubenswrapper[4791]: W0218 00:55:04.138483 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54deb01e_caa1_4fe0_8bd0_d412c4d73210.slice/crio-e8416f481fb9d2b7f0da8f6b7f78508363cee9db0fb97b4b1dee0767ff6930b4 WatchSource:0}: Error finding container e8416f481fb9d2b7f0da8f6b7f78508363cee9db0fb97b4b1dee0767ff6930b4: Status 404 returned error can't find the container with id e8416f481fb9d2b7f0da8f6b7f78508363cee9db0fb97b4b1dee0767ff6930b4 Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.139503 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.150460 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-v46bk"] Feb 18 00:55:04 crc kubenswrapper[4791]: W0218 00:55:04.160775 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod617ebaca_caf6_45cf_92b1_f1bb067bf2f1.slice/crio-e125db5116cbc5edb0b75c056abb5cca18958e984699ab0abffdeb1c9dbccfed WatchSource:0}: Error finding container e125db5116cbc5edb0b75c056abb5cca18958e984699ab0abffdeb1c9dbccfed: Status 404 returned error can't find the container with id e125db5116cbc5edb0b75c056abb5cca18958e984699ab0abffdeb1c9dbccfed Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.181185 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.188483 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-p9kdj"] Feb 18 00:55:04 crc kubenswrapper[4791]: I0218 00:55:04.942882 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.018372 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerStarted","Data":"ca6fba5979c814221553760960d5f3fcca7896906cfcac59791e4f82309fdf0c"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.030012 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerStarted","Data":"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.034791 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" event={"ID":"617ebaca-caf6-45cf-92b1-f1bb067bf2f1","Type":"ContainerStarted","Data":"e125db5116cbc5edb0b75c056abb5cca18958e984699ab0abffdeb1c9dbccfed"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.036629 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerStarted","Data":"46e14f14047bfb29055b5f1098d6e1e2aa7eceeefdacd4817e070435f78cd029"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.038371 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq" event={"ID":"54deb01e-caa1-4fe0-8bd0-d412c4d73210","Type":"ContainerStarted","Data":"e8416f481fb9d2b7f0da8f6b7f78508363cee9db0fb97b4b1dee0767ff6930b4"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.040041 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76c748cbff-8c4wc" event={"ID":"03480f27-1247-4293-9725-bd33cb3f7841","Type":"ContainerStarted","Data":"41e91e98316c47bb06e5469311407be1f776885f3badb96b7af4587b8977ab69"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.041472 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"45f7687b-ba74-41f7-bb24-e34698bb35c4","Type":"ContainerStarted","Data":"b81a97c1c7a51ed1de1170febc8545ef1efb4a1df1649be2733ce41cefd0cc51"} Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.079810 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d48bffb-aea0-4fb7-85ee-239425c1627a" path="/var/lib/kubelet/pods/4d48bffb-aea0-4fb7-85ee-239425c1627a/volumes" Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.080453 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c07c6f72-51a8-45c7-a103-d24460d4f15a" path="/var/lib/kubelet/pods/c07c6f72-51a8-45c7-a103-d24460d4f15a/volumes" Feb 18 00:55:05 crc kubenswrapper[4791]: I0218 00:55:05.083321 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-76c748cbff-8c4wc" podStartSLOduration=14.083302925 podStartE2EDuration="14.083302925s" podCreationTimestamp="2026-02-18 00:54:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:05.081744367 +0000 UTC m=+1246.649757557" watchObservedRunningTime="2026-02-18 00:55:05.083302925 +0000 UTC m=+1246.651316085" Feb 18 00:55:07 crc kubenswrapper[4791]: I0218 00:55:07.073766 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e","Type":"ContainerStarted","Data":"fe0cf96abaa604bcd483c9952d11d4a10f73b8b6c98225aa1aa0f7c5f2691b04"} Feb 18 00:55:09 crc kubenswrapper[4791]: I0218 00:55:09.098879 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:55:09 crc kubenswrapper[4791]: I0218 00:55:09.336868 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.835891 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.923532 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config\") pod \"71baa5bb-d1af-486b-a025-ca9e786d2382\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.923660 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clsqr\" (UniqueName: \"kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr\") pod \"71baa5bb-d1af-486b-a025-ca9e786d2382\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.923802 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") pod \"71baa5bb-d1af-486b-a025-ca9e786d2382\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.928787 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr" (OuterVolumeSpecName: "kube-api-access-clsqr") pod "71baa5bb-d1af-486b-a025-ca9e786d2382" (UID: "71baa5bb-d1af-486b-a025-ca9e786d2382"). InnerVolumeSpecName "kube-api-access-clsqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:10 crc kubenswrapper[4791]: E0218 00:55:10.945840 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc podName:71baa5bb-d1af-486b-a025-ca9e786d2382 nodeName:}" failed. No retries permitted until 2026-02-18 00:55:11.44581443 +0000 UTC m=+1253.013827600 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "dns-svc" (UniqueName: "kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc") pod "71baa5bb-d1af-486b-a025-ca9e786d2382" (UID: "71baa5bb-d1af-486b-a025-ca9e786d2382") : error deleting /var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volume-subpaths: remove /var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volume-subpaths: no such file or directory Feb 18 00:55:10 crc kubenswrapper[4791]: I0218 00:55:10.946286 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config" (OuterVolumeSpecName: "config") pod "71baa5bb-d1af-486b-a025-ca9e786d2382" (UID: "71baa5bb-d1af-486b-a025-ca9e786d2382"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.026250 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.026280 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clsqr\" (UniqueName: \"kubernetes.io/projected/71baa5bb-d1af-486b-a025-ca9e786d2382-kube-api-access-clsqr\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.098830 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" event={"ID":"71baa5bb-d1af-486b-a025-ca9e786d2382","Type":"ContainerDied","Data":"8453721944eff427de9f240b813e8570c57c7280c83fce04b6c3bc2a1be2ac32"} Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.098876 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-grhf9" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.531535 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.531578 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.534880 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") pod \"71baa5bb-d1af-486b-a025-ca9e786d2382\" (UID: \"71baa5bb-d1af-486b-a025-ca9e786d2382\") " Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.535666 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "71baa5bb-d1af-486b-a025-ca9e786d2382" (UID: "71baa5bb-d1af-486b-a025-ca9e786d2382"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.537393 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.638024 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/71baa5bb-d1af-486b-a025-ca9e786d2382-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.765481 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:55:11 crc kubenswrapper[4791]: I0218 00:55:11.773646 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-grhf9"] Feb 18 00:55:12 crc kubenswrapper[4791]: I0218 00:55:12.112721 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-76c748cbff-8c4wc" Feb 18 00:55:12 crc kubenswrapper[4791]: I0218 00:55:12.179837 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:55:13 crc kubenswrapper[4791]: I0218 00:55:13.074687 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71baa5bb-d1af-486b-a025-ca9e786d2382" path="/var/lib/kubelet/pods/71baa5bb-d1af-486b-a025-ca9e786d2382/volumes" Feb 18 00:55:13 crc kubenswrapper[4791]: I0218 00:55:13.119165 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e3e07d33-a963-4a49-b9f4-eb2b867eae6a","Type":"ContainerStarted","Data":"d0f4e344d76f406745172ae6eacae25f1126f061d5f5bbc1faa4da26edc99590"} Feb 18 00:55:13 crc kubenswrapper[4791]: I0218 00:55:13.136131 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=17.716336904 podStartE2EDuration="26.136112174s" podCreationTimestamp="2026-02-18 00:54:47 +0000 UTC" firstStartedPulling="2026-02-18 00:55:02.879980586 +0000 UTC m=+1244.447993756" lastFinishedPulling="2026-02-18 00:55:11.299755856 +0000 UTC m=+1252.867769026" observedRunningTime="2026-02-18 00:55:13.136064583 +0000 UTC m=+1254.704077763" watchObservedRunningTime="2026-02-18 00:55:13.136112174 +0000 UTC m=+1254.704125344" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.133715 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" event={"ID":"617ebaca-caf6-45cf-92b1-f1bb067bf2f1","Type":"ContainerStarted","Data":"b8c86f8e6c2150a2e1f251a42fb4fe6e2f76b48c02b43fa60938e7bc80174b8c"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.136674 4791 generic.go:334] "Generic (PLEG): container finished" podID="8ac723a9-6515-4a69-aca1-95e459bf2047" containerID="f776b331c912b9e2d7f8553b27d8043ab3255f8db459dc47475e20bd1afa2a42" exitCode=0 Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.136734 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l4g2x" event={"ID":"8ac723a9-6515-4a69-aca1-95e459bf2047","Type":"ContainerDied","Data":"f776b331c912b9e2d7f8553b27d8043ab3255f8db459dc47475e20bd1afa2a42"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.139587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"af0c20ed-84d0-4674-8664-1de72a190f84","Type":"ContainerStarted","Data":"df1c5ecdf365e8950237384b18df878b14e5dd2d301e493f4f4c60bdda525377"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.141455 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9e1b835-c4bf-4722-b4f5-512b3439fdfb","Type":"ContainerStarted","Data":"f8aec21e7584a123640e22fd5d8c5d22a49f4223c22b816e731783f22c4317d6"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.144587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ccdd7e1f-2546-4f46-93cd-7e07d3db2182","Type":"ContainerStarted","Data":"8e630aef482dec524705a367b2c2c8fe281886e8f8a67d1cec35a97cced6e80a"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.144906 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.148265 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerStarted","Data":"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.156302 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-z78rw" podStartSLOduration=17.036477646 podStartE2EDuration="24.156268554s" podCreationTimestamp="2026-02-18 00:54:50 +0000 UTC" firstStartedPulling="2026-02-18 00:55:04.179650708 +0000 UTC m=+1245.747663878" lastFinishedPulling="2026-02-18 00:55:11.299441616 +0000 UTC m=+1252.867454786" observedRunningTime="2026-02-18 00:55:14.145055085 +0000 UTC m=+1255.713068255" watchObservedRunningTime="2026-02-18 00:55:14.156268554 +0000 UTC m=+1255.724281754" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.157213 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e","Type":"ContainerStarted","Data":"af8a11cf4e1a01165787d2b2e4db659679a515a78264e9d22a532c7970bd0e7e"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.162227 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq" event={"ID":"54deb01e-caa1-4fe0-8bd0-d412c4d73210","Type":"ContainerStarted","Data":"67295da71040dd9dda3dfbb5a4b196b363fa06336b29b707aca5ac3202840f53"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.163278 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-5shlq" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.169289 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"45f7687b-ba74-41f7-bb24-e34698bb35c4","Type":"ContainerStarted","Data":"a86558de6c49b300e7015e14f7069901212e5c20a03ac2e1d3fbaf9eb33c6da6"} Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.169528 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.176070 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=15.282498735 podStartE2EDuration="25.176050129s" podCreationTimestamp="2026-02-18 00:54:49 +0000 UTC" firstStartedPulling="2026-02-18 00:55:02.913941522 +0000 UTC m=+1244.481954692" lastFinishedPulling="2026-02-18 00:55:12.807492916 +0000 UTC m=+1254.375506086" observedRunningTime="2026-02-18 00:55:14.163624173 +0000 UTC m=+1255.731637343" watchObservedRunningTime="2026-02-18 00:55:14.176050129 +0000 UTC m=+1255.744063309" Feb 18 00:55:14 crc kubenswrapper[4791]: I0218 00:55:14.285846 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-5shlq" podStartSLOduration=14.515490135 podStartE2EDuration="22.285831192s" podCreationTimestamp="2026-02-18 00:54:52 +0000 UTC" firstStartedPulling="2026-02-18 00:55:04.145241717 +0000 UTC m=+1245.713254887" lastFinishedPulling="2026-02-18 00:55:11.915582774 +0000 UTC m=+1253.483595944" observedRunningTime="2026-02-18 00:55:14.281813038 +0000 UTC m=+1255.849826228" watchObservedRunningTime="2026-02-18 00:55:14.285831192 +0000 UTC m=+1255.853844362" Feb 18 00:55:15 crc kubenswrapper[4791]: I0218 00:55:15.178997 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l4g2x" event={"ID":"8ac723a9-6515-4a69-aca1-95e459bf2047","Type":"ContainerStarted","Data":"998bd1e994739da5f465b12df81f0ad677e25ffcec7f3542a40641c35ded8cc5"} Feb 18 00:55:17 crc kubenswrapper[4791]: I0218 00:55:17.838400 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.269094 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerStarted","Data":"765528daa15c40187620b57eeeec366bd55c2183900753d58ea1126db14d6915"} Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.287270 4791 generic.go:334] "Generic (PLEG): container finished" podID="f9e1b835-c4bf-4722-b4f5-512b3439fdfb" containerID="f8aec21e7584a123640e22fd5d8c5d22a49f4223c22b816e731783f22c4317d6" exitCode=0 Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.287317 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9e1b835-c4bf-4722-b4f5-512b3439fdfb","Type":"ContainerDied","Data":"f8aec21e7584a123640e22fd5d8c5d22a49f4223c22b816e731783f22c4317d6"} Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.309314 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.363267 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.373925 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.397359 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.534448 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4mzv\" (UniqueName: \"kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.534881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.534935 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.636638 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.636756 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4mzv\" (UniqueName: \"kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.636843 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.637544 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.637555 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.663374 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4mzv\" (UniqueName: \"kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv\") pod \"dnsmasq-dns-7cb5889db5-v6hzs\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:20 crc kubenswrapper[4791]: I0218 00:55:20.702276 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.159908 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.299995 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f9e1b835-c4bf-4722-b4f5-512b3439fdfb","Type":"ContainerStarted","Data":"7111594a4a0018b0ecc15627032caff79cba1790a78292c03a783d74db8d4b2a"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.302033 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" event={"ID":"8fd1aa64-b534-4775-8271-e15c6d10dc5d","Type":"ContainerStarted","Data":"b4373ca1a50f5fa68fd64200c44bd4a32469072ba8168ca3b9d146784af525ae"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.304247 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"45f7687b-ba74-41f7-bb24-e34698bb35c4","Type":"ContainerStarted","Data":"1f43f42839b4214c95b4e476177077e704f24b48efa923c3849469bbe47afe51"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.306363 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ebbcd631-eef9-4249-a5a0-7aeef10d5d4e","Type":"ContainerStarted","Data":"881af9d806823d18346c7ce1c56f23e96013d212e138fa5be75a38b3eb31921a"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.308419 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l4g2x" event={"ID":"8ac723a9-6515-4a69-aca1-95e459bf2047","Type":"ContainerStarted","Data":"0be6bf9c906b5bec5fe1ce361477858f2b4e9be43ca0c7b84e8c22ab895515c7"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.308928 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.308951 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.310212 4791 generic.go:334] "Generic (PLEG): container finished" podID="af0c20ed-84d0-4674-8664-1de72a190f84" containerID="df1c5ecdf365e8950237384b18df878b14e5dd2d301e493f4f4c60bdda525377" exitCode=0 Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.311015 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"af0c20ed-84d0-4674-8664-1de72a190f84","Type":"ContainerDied","Data":"df1c5ecdf365e8950237384b18df878b14e5dd2d301e493f4f4c60bdda525377"} Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.340915 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=28.647988195 podStartE2EDuration="37.340895167s" podCreationTimestamp="2026-02-18 00:54:44 +0000 UTC" firstStartedPulling="2026-02-18 00:55:02.915361537 +0000 UTC m=+1244.483374707" lastFinishedPulling="2026-02-18 00:55:11.608268519 +0000 UTC m=+1253.176281679" observedRunningTime="2026-02-18 00:55:21.322255908 +0000 UTC m=+1262.890269098" watchObservedRunningTime="2026-02-18 00:55:21.340895167 +0000 UTC m=+1262.908908337" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.355244 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=12.471547113 podStartE2EDuration="26.355201872s" podCreationTimestamp="2026-02-18 00:54:55 +0000 UTC" firstStartedPulling="2026-02-18 00:55:06.241957061 +0000 UTC m=+1247.809970231" lastFinishedPulling="2026-02-18 00:55:20.12561182 +0000 UTC m=+1261.693624990" observedRunningTime="2026-02-18 00:55:21.341812556 +0000 UTC m=+1262.909825726" watchObservedRunningTime="2026-02-18 00:55:21.355201872 +0000 UTC m=+1262.923215042" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.389478 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-l4g2x" podStartSLOduration=21.481329988 podStartE2EDuration="29.389457818s" podCreationTimestamp="2026-02-18 00:54:52 +0000 UTC" firstStartedPulling="2026-02-18 00:55:03.759445712 +0000 UTC m=+1245.327458882" lastFinishedPulling="2026-02-18 00:55:11.667573512 +0000 UTC m=+1253.235586712" observedRunningTime="2026-02-18 00:55:21.387709043 +0000 UTC m=+1262.955722213" watchObservedRunningTime="2026-02-18 00:55:21.389457818 +0000 UTC m=+1262.957470998" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.422393 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=13.457783559 podStartE2EDuration="29.422375871s" podCreationTimestamp="2026-02-18 00:54:52 +0000 UTC" firstStartedPulling="2026-02-18 00:55:04.162837465 +0000 UTC m=+1245.730850635" lastFinishedPulling="2026-02-18 00:55:20.127429777 +0000 UTC m=+1261.695442947" observedRunningTime="2026-02-18 00:55:21.417132058 +0000 UTC m=+1262.985145238" watchObservedRunningTime="2026-02-18 00:55:21.422375871 +0000 UTC m=+1262.990389051" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.471542 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.487208 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.493122 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.493340 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.493437 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.497681 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-rqvmr" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.544797 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.557370 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-lock\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.557429 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.557734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-cache\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.557815 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chssh\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-kube-api-access-chssh\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.557947 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.559283 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569159b6-791b-428c-84c7-5387c17a731b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660734 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660786 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569159b6-791b-428c-84c7-5387c17a731b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660869 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-lock\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660909 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: E0218 00:55:21.660950 4791 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660969 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-cache\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: E0218 00:55:21.660972 4791 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.660991 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chssh\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-kube-api-access-chssh\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: E0218 00:55:21.661028 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift podName:569159b6-791b-428c-84c7-5387c17a731b nodeName:}" failed. No retries permitted until 2026-02-18 00:55:22.161008061 +0000 UTC m=+1263.729021241 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift") pod "swift-storage-0" (UID: "569159b6-791b-428c-84c7-5387c17a731b") : configmap "swift-ring-files" not found Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.661423 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-lock\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.661803 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/569159b6-791b-428c-84c7-5387c17a731b-cache\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.664526 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569159b6-791b-428c-84c7-5387c17a731b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.668375 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.668404 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c195a61b2d6dade4620651f5a53ccf1259178b8790183c03e94577ef87c32227/globalmount\"" pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.682776 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chssh\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-kube-api-access-chssh\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.708274 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a5f2597b-8afd-48ef-80b5-9fae92885ffb\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.944855 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-xlbf7"] Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.946731 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.949291 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.949448 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.950784 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Feb 18 00:55:21 crc kubenswrapper[4791]: I0218 00:55:21.956963 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xlbf7"] Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070092 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070135 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070170 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070208 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070286 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm99b\" (UniqueName: \"kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070373 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.070398 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.130833 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.172732 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.172788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.172811 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.172836 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.173003 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm99b\" (UniqueName: \"kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.173111 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.173216 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.173271 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: E0218 00:55:22.173554 4791 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 18 00:55:22 crc kubenswrapper[4791]: E0218 00:55:22.173582 4791 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 18 00:55:22 crc kubenswrapper[4791]: E0218 00:55:22.173620 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift podName:569159b6-791b-428c-84c7-5387c17a731b nodeName:}" failed. No retries permitted until 2026-02-18 00:55:23.17360465 +0000 UTC m=+1264.741617820 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift") pod "swift-storage-0" (UID: "569159b6-791b-428c-84c7-5387c17a731b") : configmap "swift-ring-files" not found Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.175073 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.175279 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.175551 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.178508 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.178799 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.179541 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.189835 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm99b\" (UniqueName: \"kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b\") pod \"swift-ring-rebalance-xlbf7\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.266925 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:22 crc kubenswrapper[4791]: W0218 00:55:22.727360 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podddbbd37f_70d8_40d3_9f79_2c8172c4d589.slice/crio-3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed WatchSource:0}: Error finding container 3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed: Status 404 returned error can't find the container with id 3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed Feb 18 00:55:22 crc kubenswrapper[4791]: I0218 00:55:22.728028 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xlbf7"] Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.191869 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:23 crc kubenswrapper[4791]: E0218 00:55:23.192107 4791 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 18 00:55:23 crc kubenswrapper[4791]: E0218 00:55:23.192371 4791 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 18 00:55:23 crc kubenswrapper[4791]: E0218 00:55:23.192464 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift podName:569159b6-791b-428c-84c7-5387c17a731b nodeName:}" failed. No retries permitted until 2026-02-18 00:55:25.192436128 +0000 UTC m=+1266.760449338 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift") pod "swift-storage-0" (UID: "569159b6-791b-428c-84c7-5387c17a731b") : configmap "swift-ring-files" not found Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.333899 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xlbf7" event={"ID":"ddbbd37f-70d8-40d3-9f79-2c8172c4d589","Type":"ContainerStarted","Data":"3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed"} Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.336263 4791 generic.go:334] "Generic (PLEG): container finished" podID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerID="8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8" exitCode=0 Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.336330 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" event={"ID":"8fd1aa64-b534-4775-8271-e15c6d10dc5d","Type":"ContainerDied","Data":"8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8"} Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.340698 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"af0c20ed-84d0-4674-8664-1de72a190f84","Type":"ContainerStarted","Data":"20a1e4485774947839eba38e9fcc296f69f4898a7633264713f1e28f5e1c8055"} Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.397556 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=25.903365852 podStartE2EDuration="37.397540865s" podCreationTimestamp="2026-02-18 00:54:46 +0000 UTC" firstStartedPulling="2026-02-18 00:54:59.805623324 +0000 UTC m=+1241.373636494" lastFinishedPulling="2026-02-18 00:55:11.299798327 +0000 UTC m=+1252.867811507" observedRunningTime="2026-02-18 00:55:23.394916964 +0000 UTC m=+1264.962930204" watchObservedRunningTime="2026-02-18 00:55:23.397540865 +0000 UTC m=+1264.965554035" Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.658715 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.659023 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Feb 18 00:55:23 crc kubenswrapper[4791]: I0218 00:55:23.703273 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.131348 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.187076 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.376033 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" event={"ID":"8fd1aa64-b534-4775-8271-e15c6d10dc5d","Type":"ContainerStarted","Data":"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6"} Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.405408 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" podStartSLOduration=4.405391472 podStartE2EDuration="4.405391472s" podCreationTimestamp="2026-02-18 00:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:24.40370252 +0000 UTC m=+1265.971715690" watchObservedRunningTime="2026-02-18 00:55:24.405391472 +0000 UTC m=+1265.973404642" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.432903 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.448771 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.683852 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.730635 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-7k67t"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.732342 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.734756 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.739458 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-l7hr6"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.741055 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.748849 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.760781 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-7k67t"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.776529 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-l7hr6"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835400 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj2wp\" (UniqueName: \"kubernetes.io/projected/b959be4d-fe96-46d6-bf53-c76a3fbd2647-kube-api-access-wj2wp\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835452 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b959be4d-fe96-46d6-bf53-c76a3fbd2647-config\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835486 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835529 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835548 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p74p\" (UniqueName: \"kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835591 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovn-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835616 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovs-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835632 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-combined-ca-bundle\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.835705 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.863814 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-7k67t"] Feb 18 00:55:24 crc kubenswrapper[4791]: E0218 00:55:24.866520 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-7p74p ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" podUID="420f17be-f676-4f31-8d07-e206a9313e12" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.898047 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.899675 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.907791 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.918306 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941517 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941562 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941643 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj2wp\" (UniqueName: \"kubernetes.io/projected/b959be4d-fe96-46d6-bf53-c76a3fbd2647-kube-api-access-wj2wp\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941674 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b959be4d-fe96-46d6-bf53-c76a3fbd2647-config\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941713 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941771 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941792 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p74p\" (UniqueName: \"kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941852 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovn-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941885 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovs-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.941908 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-combined-ca-bundle\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.944141 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.945195 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.944170 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.945484 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovn-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.945514 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b959be4d-fe96-46d6-bf53-c76a3fbd2647-ovs-rundir\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.946716 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b959be4d-fe96-46d6-bf53-c76a3fbd2647-config\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.959008 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-combined-ca-bundle\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.960632 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b959be4d-fe96-46d6-bf53-c76a3fbd2647-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:24 crc kubenswrapper[4791]: I0218 00:55:24.981277 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p74p\" (UniqueName: \"kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p\") pod \"dnsmasq-dns-74f6f696b9-7k67t\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.010820 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj2wp\" (UniqueName: \"kubernetes.io/projected/b959be4d-fe96-46d6-bf53-c76a3fbd2647-kube-api-access-wj2wp\") pod \"ovn-controller-metrics-l7hr6\" (UID: \"b959be4d-fe96-46d6-bf53-c76a3fbd2647\") " pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.043715 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqblt\" (UniqueName: \"kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.043818 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.043844 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.043897 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.043972 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.045870 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.047483 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.056611 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.056874 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.062411 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-v645g" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.062579 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.076996 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.079643 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-l7hr6" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146319 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146372 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqblt\" (UniqueName: \"kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146402 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146432 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146486 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146506 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146554 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-config\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146578 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcg7v\" (UniqueName: \"kubernetes.io/projected/d5580709-c614-4e19-a6c2-58f2ea044e0e-kube-api-access-vcg7v\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146595 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146615 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-scripts\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146683 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.146708 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.153744 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.154498 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.157484 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.158144 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.216504 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqblt\" (UniqueName: \"kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt\") pod \"dnsmasq-dns-698758b865-nkd6n\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.225790 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287429 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcg7v\" (UniqueName: \"kubernetes.io/projected/d5580709-c614-4e19-a6c2-58f2ea044e0e-kube-api-access-vcg7v\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287498 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-scripts\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287676 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287819 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287843 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287897 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.287971 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.288091 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-config\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.290140 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-config\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: E0218 00:55:25.292410 4791 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 18 00:55:25 crc kubenswrapper[4791]: E0218 00:55:25.304383 4791 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 18 00:55:25 crc kubenswrapper[4791]: E0218 00:55:25.304443 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift podName:569159b6-791b-428c-84c7-5387c17a731b nodeName:}" failed. No retries permitted until 2026-02-18 00:55:29.304424357 +0000 UTC m=+1270.872437527 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift") pod "swift-storage-0" (UID: "569159b6-791b-428c-84c7-5387c17a731b") : configmap "swift-ring-files" not found Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.292896 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.292884 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.297866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.293054 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5580709-c614-4e19-a6c2-58f2ea044e0e-scripts\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.292794 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5580709-c614-4e19-a6c2-58f2ea044e0e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.325453 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcg7v\" (UniqueName: \"kubernetes.io/projected/d5580709-c614-4e19-a6c2-58f2ea044e0e-kube-api-access-vcg7v\") pod \"ovn-northd-0\" (UID: \"d5580709-c614-4e19-a6c2-58f2ea044e0e\") " pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.384923 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.385831 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.389313 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.401198 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.491704 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config\") pod \"420f17be-f676-4f31-8d07-e206a9313e12\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.491855 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb\") pod \"420f17be-f676-4f31-8d07-e206a9313e12\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.492028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc\") pod \"420f17be-f676-4f31-8d07-e206a9313e12\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.492064 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7p74p\" (UniqueName: \"kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p\") pod \"420f17be-f676-4f31-8d07-e206a9313e12\" (UID: \"420f17be-f676-4f31-8d07-e206a9313e12\") " Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.492142 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config" (OuterVolumeSpecName: "config") pod "420f17be-f676-4f31-8d07-e206a9313e12" (UID: "420f17be-f676-4f31-8d07-e206a9313e12"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.492347 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "420f17be-f676-4f31-8d07-e206a9313e12" (UID: "420f17be-f676-4f31-8d07-e206a9313e12"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.492473 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "420f17be-f676-4f31-8d07-e206a9313e12" (UID: "420f17be-f676-4f31-8d07-e206a9313e12"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.494573 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.494593 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.494603 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/420f17be-f676-4f31-8d07-e206a9313e12-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.495183 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p" (OuterVolumeSpecName: "kube-api-access-7p74p") pod "420f17be-f676-4f31-8d07-e206a9313e12" (UID: "420f17be-f676-4f31-8d07-e206a9313e12"). InnerVolumeSpecName "kube-api-access-7p74p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:25 crc kubenswrapper[4791]: I0218 00:55:25.597280 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7p74p\" (UniqueName: \"kubernetes.io/projected/420f17be-f676-4f31-8d07-e206a9313e12-kube-api-access-7p74p\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.234485 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.235189 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.399991 4791 generic.go:334] "Generic (PLEG): container finished" podID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerID="765528daa15c40187620b57eeeec366bd55c2183900753d58ea1126db14d6915" exitCode=0 Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.400068 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerDied","Data":"765528daa15c40187620b57eeeec366bd55c2183900753d58ea1126db14d6915"} Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.400300 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-7k67t" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.400299 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="dnsmasq-dns" containerID="cri-o://2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6" gracePeriod=10 Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.479654 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-7k67t"] Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.491273 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-7k67t"] Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.799520 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.799888 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.799949 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.801185 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:55:26 crc kubenswrapper[4791]: I0218 00:55:26.801289 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80" gracePeriod=600 Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.080826 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="420f17be-f676-4f31-8d07-e206a9313e12" path="/var/lib/kubelet/pods/420f17be-f676-4f31-8d07-e206a9313e12/volumes" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.317838 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.417732 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80" exitCode=0 Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.417876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80"} Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.417905 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166"} Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.417921 4791 scope.go:117] "RemoveContainer" containerID="35a1be9cfcdfb0c3c05b26f9c95806278509c3206e9bc4177d7fa8a8a51ad178" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.420351 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xlbf7" event={"ID":"ddbbd37f-70d8-40d3-9f79-2c8172c4d589","Type":"ContainerStarted","Data":"d6340c5c3fe5942a192a12ad74fea5a7e1b5a411d859e970a33dc3afcc387293"} Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.426659 4791 generic.go:334] "Generic (PLEG): container finished" podID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerID="2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6" exitCode=0 Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.426702 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" event={"ID":"8fd1aa64-b534-4775-8271-e15c6d10dc5d","Type":"ContainerDied","Data":"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6"} Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.426727 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" event={"ID":"8fd1aa64-b534-4775-8271-e15c6d10dc5d","Type":"ContainerDied","Data":"b4373ca1a50f5fa68fd64200c44bd4a32469072ba8168ca3b9d146784af525ae"} Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.426781 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-v6hzs" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.448469 4791 scope.go:117] "RemoveContainer" containerID="2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.457323 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc\") pod \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.457539 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4mzv\" (UniqueName: \"kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv\") pod \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.457642 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") pod \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.468917 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv" (OuterVolumeSpecName: "kube-api-access-p4mzv") pod "8fd1aa64-b534-4775-8271-e15c6d10dc5d" (UID: "8fd1aa64-b534-4775-8271-e15c6d10dc5d"). InnerVolumeSpecName "kube-api-access-p4mzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.468990 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-xlbf7" podStartSLOduration=2.199338993 podStartE2EDuration="6.46896791s" podCreationTimestamp="2026-02-18 00:55:21 +0000 UTC" firstStartedPulling="2026-02-18 00:55:22.729151543 +0000 UTC m=+1264.297164713" lastFinishedPulling="2026-02-18 00:55:26.99878046 +0000 UTC m=+1268.566793630" observedRunningTime="2026-02-18 00:55:27.463072866 +0000 UTC m=+1269.031086056" watchObservedRunningTime="2026-02-18 00:55:27.46896791 +0000 UTC m=+1269.036981100" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.477120 4791 scope.go:117] "RemoveContainer" containerID="8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.503077 4791 scope.go:117] "RemoveContainer" containerID="2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6" Feb 18 00:55:27 crc kubenswrapper[4791]: E0218 00:55:27.503609 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6\": container with ID starting with 2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6 not found: ID does not exist" containerID="2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.503646 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6"} err="failed to get container status \"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6\": rpc error: code = NotFound desc = could not find container \"2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6\": container with ID starting with 2264c49289721c7bd3530b73579816924aa6f1ec517e134c7e2acd402bd3eaa6 not found: ID does not exist" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.503690 4791 scope.go:117] "RemoveContainer" containerID="8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8" Feb 18 00:55:27 crc kubenswrapper[4791]: E0218 00:55:27.504005 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8\": container with ID starting with 8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8 not found: ID does not exist" containerID="8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.504044 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8"} err="failed to get container status \"8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8\": rpc error: code = NotFound desc = could not find container \"8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8\": container with ID starting with 8a8746faf0b9e85aa5918c69a896e14622f033a6c3ddd9f7fbc46a025cfeb3d8 not found: ID does not exist" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.523594 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:55:27 crc kubenswrapper[4791]: W0218 00:55:27.536191 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb959be4d_fe96_46d6_bf53_c76a3fbd2647.slice/crio-59ada6c47490ebceef6c61f974495c2e7680a276b6eaeb4df277a72139af06f8 WatchSource:0}: Error finding container 59ada6c47490ebceef6c61f974495c2e7680a276b6eaeb4df277a72139af06f8: Status 404 returned error can't find the container with id 59ada6c47490ebceef6c61f974495c2e7680a276b6eaeb4df277a72139af06f8 Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.561422 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-l7hr6"] Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.562101 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config" (OuterVolumeSpecName: "config") pod "8fd1aa64-b534-4775-8271-e15c6d10dc5d" (UID: "8fd1aa64-b534-4775-8271-e15c6d10dc5d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.562673 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") pod \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\" (UID: \"8fd1aa64-b534-4775-8271-e15c6d10dc5d\") " Feb 18 00:55:27 crc kubenswrapper[4791]: W0218 00:55:27.562822 4791 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8fd1aa64-b534-4775-8271-e15c6d10dc5d/volumes/kubernetes.io~configmap/config Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.562931 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config" (OuterVolumeSpecName: "config") pod "8fd1aa64-b534-4775-8271-e15c6d10dc5d" (UID: "8fd1aa64-b534-4775-8271-e15c6d10dc5d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.564676 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4mzv\" (UniqueName: \"kubernetes.io/projected/8fd1aa64-b534-4775-8271-e15c6d10dc5d-kube-api-access-p4mzv\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.564699 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.566940 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8fd1aa64-b534-4775-8271-e15c6d10dc5d" (UID: "8fd1aa64-b534-4775-8271-e15c6d10dc5d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.666950 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fd1aa64-b534-4775-8271-e15c6d10dc5d-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.738755 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.785927 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.808450 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-v6hzs"] Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.830395 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Feb 18 00:55:27 crc kubenswrapper[4791]: I0218 00:55:27.830434 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.443229 4791 generic.go:334] "Generic (PLEG): container finished" podID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerID="6bc92c7ea057ed891b0298a2c0398290482d205d5d0939f82641e19c1b0c2f11" exitCode=0 Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.443290 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nkd6n" event={"ID":"6eb117b6-f49c-4bc7-a59e-50c32713d4a2","Type":"ContainerDied","Data":"6bc92c7ea057ed891b0298a2c0398290482d205d5d0939f82641e19c1b0c2f11"} Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.443628 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nkd6n" event={"ID":"6eb117b6-f49c-4bc7-a59e-50c32713d4a2","Type":"ContainerStarted","Data":"1af09ea4d03cefa8b128cf61829af8bb8857663402f579af2f1959f98794aa42"} Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.456737 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d5580709-c614-4e19-a6c2-58f2ea044e0e","Type":"ContainerStarted","Data":"ddb867d987f8bef1f4ec1f22552073291abd41b3f2c708af4b5a7773efd4d1db"} Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.462044 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-l7hr6" event={"ID":"b959be4d-fe96-46d6-bf53-c76a3fbd2647","Type":"ContainerStarted","Data":"8b8ed466f380ce937e4023a3a6ea44232237fdb180f659d1973c3cec9f3b06d3"} Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.462083 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-l7hr6" event={"ID":"b959be4d-fe96-46d6-bf53-c76a3fbd2647","Type":"ContainerStarted","Data":"59ada6c47490ebceef6c61f974495c2e7680a276b6eaeb4df277a72139af06f8"} Feb 18 00:55:28 crc kubenswrapper[4791]: I0218 00:55:28.500419 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-l7hr6" podStartSLOduration=4.500400221 podStartE2EDuration="4.500400221s" podCreationTimestamp="2026-02-18 00:55:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:28.487333444 +0000 UTC m=+1270.055346614" watchObservedRunningTime="2026-02-18 00:55:28.500400221 +0000 UTC m=+1270.068413391" Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.059756 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.074387 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" path="/var/lib/kubelet/pods/8fd1aa64-b534-4775-8271-e15c6d10dc5d/volumes" Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.157661 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.307590 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:29 crc kubenswrapper[4791]: E0218 00:55:29.308073 4791 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Feb 18 00:55:29 crc kubenswrapper[4791]: E0218 00:55:29.308959 4791 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Feb 18 00:55:29 crc kubenswrapper[4791]: E0218 00:55:29.309139 4791 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift podName:569159b6-791b-428c-84c7-5387c17a731b nodeName:}" failed. No retries permitted until 2026-02-18 00:55:37.309125246 +0000 UTC m=+1278.877138416 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift") pod "swift-storage-0" (UID: "569159b6-791b-428c-84c7-5387c17a731b") : configmap "swift-ring-files" not found Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.472379 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nkd6n" event={"ID":"6eb117b6-f49c-4bc7-a59e-50c32713d4a2","Type":"ContainerStarted","Data":"60217095f344c2c2439620205475f6181ec1deeec7efc211b548c920612e7b2c"} Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.472579 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.475294 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d5580709-c614-4e19-a6c2-58f2ea044e0e","Type":"ContainerStarted","Data":"03231e30d1954d4822ec67494ad7260132314356c0746bc2c46385b571b9ec2c"} Feb 18 00:55:29 crc kubenswrapper[4791]: I0218 00:55:29.501314 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-nkd6n" podStartSLOduration=5.501295811 podStartE2EDuration="5.501295811s" podCreationTimestamp="2026-02-18 00:55:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:29.489769753 +0000 UTC m=+1271.057782943" watchObservedRunningTime="2026-02-18 00:55:29.501295811 +0000 UTC m=+1271.069308981" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.387952 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-a83e-account-create-update-zjqhp"] Feb 18 00:55:30 crc kubenswrapper[4791]: E0218 00:55:30.389277 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="dnsmasq-dns" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.389307 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="dnsmasq-dns" Feb 18 00:55:30 crc kubenswrapper[4791]: E0218 00:55:30.389345 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="init" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.389359 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="init" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.389707 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fd1aa64-b534-4775-8271-e15c6d10dc5d" containerName="dnsmasq-dns" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.390950 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.393891 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.399808 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-a83e-account-create-update-zjqhp"] Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.446640 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.447220 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twz4l\" (UniqueName: \"kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.481840 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-qd8vw"] Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.488315 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.499652 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d5580709-c614-4e19-a6c2-58f2ea044e0e","Type":"ContainerStarted","Data":"920dfcaa8dfdb8e0e6d071a57a82714893b608fc409b84b9dc8f81859ae770be"} Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.500284 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.502730 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-qd8vw"] Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.531976 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=5.187325791 podStartE2EDuration="6.531941998s" podCreationTimestamp="2026-02-18 00:55:24 +0000 UTC" firstStartedPulling="2026-02-18 00:55:27.764035565 +0000 UTC m=+1269.332048745" lastFinishedPulling="2026-02-18 00:55:29.108651782 +0000 UTC m=+1270.676664952" observedRunningTime="2026-02-18 00:55:30.528469879 +0000 UTC m=+1272.096483039" watchObservedRunningTime="2026-02-18 00:55:30.531941998 +0000 UTC m=+1272.099955158" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.550696 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.550752 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twz4l\" (UniqueName: \"kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.550832 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.550937 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fhrn\" (UniqueName: \"kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.552346 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.578420 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twz4l\" (UniqueName: \"kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l\") pod \"mysqld-exporter-a83e-account-create-update-zjqhp\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.652516 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fhrn\" (UniqueName: \"kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.652669 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.653806 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.666983 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fhrn\" (UniqueName: \"kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn\") pod \"mysqld-exporter-openstack-db-create-qd8vw\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.752654 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:30 crc kubenswrapper[4791]: I0218 00:55:30.823137 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.278810 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-a83e-account-create-update-zjqhp"] Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.507305 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" event={"ID":"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3","Type":"ContainerStarted","Data":"e766732420e3a75e8343ebc876f61c8760fe56c6df94ac820eb3633b3c423783"} Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.507348 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" event={"ID":"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3","Type":"ContainerStarted","Data":"fcccfee3337599d0830f12bf8c1d184663919d15c2473991ffd31911a0999726"} Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.526358 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" podStartSLOduration=1.5263405570000002 podStartE2EDuration="1.526340557s" podCreationTimestamp="2026-02-18 00:55:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:31.521501687 +0000 UTC m=+1273.089514857" watchObservedRunningTime="2026-02-18 00:55:31.526340557 +0000 UTC m=+1273.094353737" Feb 18 00:55:31 crc kubenswrapper[4791]: W0218 00:55:31.629418 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc85e1e81_d34b_45e3_9b7a_f73c712f6ad8.slice/crio-7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b WatchSource:0}: Error finding container 7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b: Status 404 returned error can't find the container with id 7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.632894 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-qd8vw"] Feb 18 00:55:31 crc kubenswrapper[4791]: I0218 00:55:31.986781 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.073496 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.518490 4791 generic.go:334] "Generic (PLEG): container finished" podID="c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" containerID="959282c7dbbc9460cf812811932dd7f23d015d63c6f808960ebb5d9e0f157d2d" exitCode=0 Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.518580 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" event={"ID":"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8","Type":"ContainerDied","Data":"959282c7dbbc9460cf812811932dd7f23d015d63c6f808960ebb5d9e0f157d2d"} Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.518787 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" event={"ID":"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8","Type":"ContainerStarted","Data":"7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b"} Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.520353 4791 generic.go:334] "Generic (PLEG): container finished" podID="075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" containerID="e766732420e3a75e8343ebc876f61c8760fe56c6df94ac820eb3633b3c423783" exitCode=0 Feb 18 00:55:32 crc kubenswrapper[4791]: I0218 00:55:32.520663 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" event={"ID":"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3","Type":"ContainerDied","Data":"e766732420e3a75e8343ebc876f61c8760fe56c6df94ac820eb3633b3c423783"} Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.112117 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.122233 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.133826 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts\") pod \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.134084 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twz4l\" (UniqueName: \"kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l\") pod \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\" (UID: \"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3\") " Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.135253 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" (UID: "075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.142421 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l" (OuterVolumeSpecName: "kube-api-access-twz4l") pod "075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" (UID: "075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3"). InnerVolumeSpecName "kube-api-access-twz4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.235267 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fhrn\" (UniqueName: \"kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn\") pod \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.235573 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts\") pod \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\" (UID: \"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8\") " Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.236225 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twz4l\" (UniqueName: \"kubernetes.io/projected/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-kube-api-access-twz4l\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.236249 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.237715 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" (UID: "c85e1e81-d34b-45e3-9b7a-f73c712f6ad8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.238079 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn" (OuterVolumeSpecName: "kube-api-access-2fhrn") pod "c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" (UID: "c85e1e81-d34b-45e3-9b7a-f73c712f6ad8"). InnerVolumeSpecName "kube-api-access-2fhrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.338022 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fhrn\" (UniqueName: \"kubernetes.io/projected/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-kube-api-access-2fhrn\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.338062 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.540133 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" event={"ID":"075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3","Type":"ContainerDied","Data":"fcccfee3337599d0830f12bf8c1d184663919d15c2473991ffd31911a0999726"} Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.540194 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcccfee3337599d0830f12bf8c1d184663919d15c2473991ffd31911a0999726" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.540243 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-a83e-account-create-update-zjqhp" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.541619 4791 generic.go:334] "Generic (PLEG): container finished" podID="ddbbd37f-70d8-40d3-9f79-2c8172c4d589" containerID="d6340c5c3fe5942a192a12ad74fea5a7e1b5a411d859e970a33dc3afcc387293" exitCode=0 Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.541841 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xlbf7" event={"ID":"ddbbd37f-70d8-40d3-9f79-2c8172c4d589","Type":"ContainerDied","Data":"d6340c5c3fe5942a192a12ad74fea5a7e1b5a411d859e970a33dc3afcc387293"} Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.542838 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" event={"ID":"c85e1e81-d34b-45e3-9b7a-f73c712f6ad8","Type":"ContainerDied","Data":"7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b"} Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.542955 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7204ade561ddce2c94c638a69507d52183639e8ac1e9c9d2ad734255fcfccf7b" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.542878 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-qd8vw" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.842755 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-k2mq5"] Feb 18 00:55:34 crc kubenswrapper[4791]: E0218 00:55:34.843172 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" containerName="mariadb-database-create" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.843188 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" containerName="mariadb-database-create" Feb 18 00:55:34 crc kubenswrapper[4791]: E0218 00:55:34.843212 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" containerName="mariadb-account-create-update" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.843218 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" containerName="mariadb-account-create-update" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.843426 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" containerName="mariadb-account-create-update" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.843450 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" containerName="mariadb-database-create" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.844143 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.848313 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.856672 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-k2mq5"] Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.951668 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:34 crc kubenswrapper[4791]: I0218 00:55:34.951717 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pksdn\" (UniqueName: \"kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.053962 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.054003 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pksdn\" (UniqueName: \"kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.055903 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.069549 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pksdn\" (UniqueName: \"kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn\") pod \"root-account-create-update-k2mq5\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.205276 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.228636 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.306765 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.307034 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="dnsmasq-dns" containerID="cri-o://722be72ca704049c042c7541c41c34b4d5e9ab28c0219f63370e376e97c80e55" gracePeriod=10 Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.553742 4791 generic.go:334] "Generic (PLEG): container finished" podID="864417f3-df49-4f21-b1a7-8895951b7914" containerID="722be72ca704049c042c7541c41c34b4d5e9ab28c0219f63370e376e97c80e55" exitCode=0 Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.553827 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" event={"ID":"864417f3-df49-4f21-b1a7-8895951b7914","Type":"ContainerDied","Data":"722be72ca704049c042c7541c41c34b4d5e9ab28c0219f63370e376e97c80e55"} Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.732987 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql"] Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.734509 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.749829 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql"] Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.770226 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-k2mq5"] Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.778975 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnpdn\" (UniqueName: \"kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.779088 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.881289 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnpdn\" (UniqueName: \"kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.881345 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.882633 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.910881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnpdn\" (UniqueName: \"kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn\") pod \"mysqld-exporter-openstack-cell1-db-create-nw6ql\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.955650 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-cae6-account-create-update-jthwt"] Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.957405 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.963086 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Feb 18 00:55:35 crc kubenswrapper[4791]: I0218 00:55:35.974583 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-cae6-account-create-update-jthwt"] Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.048752 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.049496 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.105812 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.105935 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.105961 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106000 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config\") pod \"864417f3-df49-4f21-b1a7-8895951b7914\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106089 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc\") pod \"864417f3-df49-4f21-b1a7-8895951b7914\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106110 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106145 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106203 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106232 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm99b\" (UniqueName: \"kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b\") pod \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\" (UID: \"ddbbd37f-70d8-40d3-9f79-2c8172c4d589\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106309 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmvcl\" (UniqueName: \"kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl\") pod \"864417f3-df49-4f21-b1a7-8895951b7914\" (UID: \"864417f3-df49-4f21-b1a7-8895951b7914\") " Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106654 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rnrv\" (UniqueName: \"kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.106766 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.108002 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.110149 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.118390 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl" (OuterVolumeSpecName: "kube-api-access-vmvcl") pod "864417f3-df49-4f21-b1a7-8895951b7914" (UID: "864417f3-df49-4f21-b1a7-8895951b7914"). InnerVolumeSpecName "kube-api-access-vmvcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.122698 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b" (OuterVolumeSpecName: "kube-api-access-bm99b") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "kube-api-access-bm99b". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.149889 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.152622 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.172543 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.175493 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts" (OuterVolumeSpecName: "scripts") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.188697 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "864417f3-df49-4f21-b1a7-8895951b7914" (UID: "864417f3-df49-4f21-b1a7-8895951b7914"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.194855 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddbbd37f-70d8-40d3-9f79-2c8172c4d589" (UID: "ddbbd37f-70d8-40d3-9f79-2c8172c4d589"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208354 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208526 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rnrv\" (UniqueName: \"kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208604 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm99b\" (UniqueName: \"kubernetes.io/projected/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-kube-api-access-bm99b\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208617 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmvcl\" (UniqueName: \"kubernetes.io/projected/864417f3-df49-4f21-b1a7-8895951b7914-kube-api-access-vmvcl\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208626 4791 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-dispersionconf\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208635 4791 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-etc-swift\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208643 4791 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-ring-data-devices\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208651 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208659 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208668 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.208676 4791 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ddbbd37f-70d8-40d3-9f79-2c8172c4d589-swiftconf\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.209518 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.214512 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config" (OuterVolumeSpecName: "config") pod "864417f3-df49-4f21-b1a7-8895951b7914" (UID: "864417f3-df49-4f21-b1a7-8895951b7914"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.236631 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rnrv\" (UniqueName: \"kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv\") pod \"mysqld-exporter-cae6-account-create-update-jthwt\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.310753 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/864417f3-df49-4f21-b1a7-8895951b7914-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.320879 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.566261 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xlbf7" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.566274 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xlbf7" event={"ID":"ddbbd37f-70d8-40d3-9f79-2c8172c4d589","Type":"ContainerDied","Data":"3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.566628 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d03b6c3c7ac56e6a4c24d30c26534f230e745ed40d6286c3f0b5c3e9649faed" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.568615 4791 generic.go:334] "Generic (PLEG): container finished" podID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerID="ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e" exitCode=0 Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.568685 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerDied","Data":"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.574391 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.574468 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-2nbhb" event={"ID":"864417f3-df49-4f21-b1a7-8895951b7914","Type":"ContainerDied","Data":"1c27facc4838726eccc6855f42697262954be1544f233e87d857b6bfdcb8a5a1"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.574542 4791 scope.go:117] "RemoveContainer" containerID="722be72ca704049c042c7541c41c34b4d5e9ab28c0219f63370e376e97c80e55" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.579076 4791 generic.go:334] "Generic (PLEG): container finished" podID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerID="583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4" exitCode=0 Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.579196 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerDied","Data":"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.584791 4791 generic.go:334] "Generic (PLEG): container finished" podID="f09f99ff-9d2b-48c9-96d3-5b39bee97442" containerID="5367edaf1f83dc270c9d689e0ceaf3e267f32356c387e9259f2b46e5292ccedc" exitCode=0 Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.584828 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-k2mq5" event={"ID":"f09f99ff-9d2b-48c9-96d3-5b39bee97442","Type":"ContainerDied","Data":"5367edaf1f83dc270c9d689e0ceaf3e267f32356c387e9259f2b46e5292ccedc"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.584854 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-k2mq5" event={"ID":"f09f99ff-9d2b-48c9-96d3-5b39bee97442","Type":"ContainerStarted","Data":"6f9274f4739974401e0de29dd7074a3a77151843531b770c57704c7bebba8591"} Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.614752 4791 scope.go:117] "RemoveContainer" containerID="0c6df0e82cab763400d75f36a537b7cf35cb345e0c08a19ed5352226b7fb37fb" Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.680380 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.690351 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-2nbhb"] Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.699919 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql"] Feb 18 00:55:36 crc kubenswrapper[4791]: W0218 00:55:36.714538 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba41ccfb_9ea8_47e2_a814_d958e7ced77f.slice/crio-0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e WatchSource:0}: Error finding container 0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e: Status 404 returned error can't find the container with id 0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e Feb 18 00:55:36 crc kubenswrapper[4791]: I0218 00:55:36.822444 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-cae6-account-create-update-jthwt"] Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.099031 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="864417f3-df49-4f21-b1a7-8895951b7914" path="/var/lib/kubelet/pods/864417f3-df49-4f21-b1a7-8895951b7914/volumes" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.241087 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-776c7c9864-7v9mt" podUID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" containerName="console" containerID="cri-o://f198e41ac37ae308322af87d21db9d60e1bd1aa3723a98ee586d6f66d01b3b26" gracePeriod=15 Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.355123 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.363636 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/569159b6-791b-428c-84c7-5387c17a731b-etc-swift\") pod \"swift-storage-0\" (UID: \"569159b6-791b-428c-84c7-5387c17a731b\") " pod="openstack/swift-storage-0" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.424247 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.616300 4791 generic.go:334] "Generic (PLEG): container finished" podID="ba41ccfb-9ea8-47e2-a814-d958e7ced77f" containerID="506bcfed16f68d1ac4430dbc9253c86aecb1a8036040ab4c28f87ce750c095c4" exitCode=0 Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.617574 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" event={"ID":"ba41ccfb-9ea8-47e2-a814-d958e7ced77f","Type":"ContainerDied","Data":"506bcfed16f68d1ac4430dbc9253c86aecb1a8036040ab4c28f87ce750c095c4"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.617606 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" event={"ID":"ba41ccfb-9ea8-47e2-a814-d958e7ced77f","Type":"ContainerStarted","Data":"0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.628767 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerStarted","Data":"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.629747 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.647110 4791 generic.go:334] "Generic (PLEG): container finished" podID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerID="8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad" exitCode=0 Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.647204 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerDied","Data":"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.654703 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-776c7c9864-7v9mt_a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0/console/0.log" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.654749 4791 generic.go:334] "Generic (PLEG): container finished" podID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" containerID="f198e41ac37ae308322af87d21db9d60e1bd1aa3723a98ee586d6f66d01b3b26" exitCode=2 Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.654826 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-776c7c9864-7v9mt" event={"ID":"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0","Type":"ContainerDied","Data":"f198e41ac37ae308322af87d21db9d60e1bd1aa3723a98ee586d6f66d01b3b26"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.665868 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" event={"ID":"3c66a47b-ef25-4487-b69c-1a7c7454631d","Type":"ContainerStarted","Data":"57f261c0206fe61ff589d5df0dcb9ea8dd3870246717bbcac307b2d9d1cde62f"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.665913 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" event={"ID":"3c66a47b-ef25-4487-b69c-1a7c7454631d","Type":"ContainerStarted","Data":"9de93ddf07dd254d7ffe143159c021e5a8e154803c4440ab298c905d165040bc"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.671080 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerStarted","Data":"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551"} Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.672170 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.685749 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=43.225670382 podStartE2EDuration="54.685733393s" podCreationTimestamp="2026-02-18 00:54:43 +0000 UTC" firstStartedPulling="2026-02-18 00:54:50.680293668 +0000 UTC m=+1232.248306838" lastFinishedPulling="2026-02-18 00:55:02.140356679 +0000 UTC m=+1243.708369849" observedRunningTime="2026-02-18 00:55:37.678788397 +0000 UTC m=+1279.246801567" watchObservedRunningTime="2026-02-18 00:55:37.685733393 +0000 UTC m=+1279.253746563" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.712685 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-776c7c9864-7v9mt_a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0/console/0.log" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.712741 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.724710 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=53.561948031 podStartE2EDuration="54.724696094s" podCreationTimestamp="2026-02-18 00:54:43 +0000 UTC" firstStartedPulling="2026-02-18 00:55:01.011133008 +0000 UTC m=+1242.579146178" lastFinishedPulling="2026-02-18 00:55:02.173881071 +0000 UTC m=+1243.741894241" observedRunningTime="2026-02-18 00:55:37.717673186 +0000 UTC m=+1279.285686356" watchObservedRunningTime="2026-02-18 00:55:37.724696094 +0000 UTC m=+1279.292709264" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.821596 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" podStartSLOduration=2.821578836 podStartE2EDuration="2.821578836s" podCreationTimestamp="2026-02-18 00:55:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:37.813985671 +0000 UTC m=+1279.381998841" watchObservedRunningTime="2026-02-18 00:55:37.821578836 +0000 UTC m=+1279.389592006" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.866913 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.866967 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.867056 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.867138 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.867196 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.867235 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd82k\" (UniqueName: \"kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.867282 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle\") pod \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\" (UID: \"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0\") " Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.868733 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca" (OuterVolumeSpecName: "service-ca") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.869164 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.869576 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config" (OuterVolumeSpecName: "console-config") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:37 crc kubenswrapper[4791]: I0218 00:55:37.870820 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.885418 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.893388 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k" (OuterVolumeSpecName: "kube-api-access-cd82k") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "kube-api-access-cd82k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.903322 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" (UID: "a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970491 4791 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970521 4791 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-oauth-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970532 4791 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-console-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970542 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd82k\" (UniqueName: \"kubernetes.io/projected/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-kube-api-access-cd82k\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970551 4791 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970560 4791 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-service-ca\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:37.970567 4791 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048044 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-9mfr2"] Feb 18 00:55:38 crc kubenswrapper[4791]: E0218 00:55:38.048468 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" containerName="console" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048481 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" containerName="console" Feb 18 00:55:38 crc kubenswrapper[4791]: E0218 00:55:38.048497 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="dnsmasq-dns" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048503 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="dnsmasq-dns" Feb 18 00:55:38 crc kubenswrapper[4791]: E0218 00:55:38.048519 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbbd37f-70d8-40d3-9f79-2c8172c4d589" containerName="swift-ring-rebalance" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048525 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbbd37f-70d8-40d3-9f79-2c8172c4d589" containerName="swift-ring-rebalance" Feb 18 00:55:38 crc kubenswrapper[4791]: E0218 00:55:38.048538 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="init" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048543 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="init" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048741 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" containerName="console" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048752 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbbd37f-70d8-40d3-9f79-2c8172c4d589" containerName="swift-ring-rebalance" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.048765 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="864417f3-df49-4f21-b1a7-8895951b7914" containerName="dnsmasq-dns" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.049413 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.073828 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9mfr2"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.163759 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-5db2-account-create-update-kbtp7"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.165215 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.169515 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.172129 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5db2-account-create-update-kbtp7"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.176793 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.176885 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxxcc\" (UniqueName: \"kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.278404 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.278480 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxxcc\" (UniqueName: \"kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.278592 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.278698 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.279558 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.300267 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxxcc\" (UniqueName: \"kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc\") pod \"glance-db-create-9mfr2\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.342421 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.380384 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.380507 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.381305 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.404023 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml\") pod \"glance-5db2-account-create-update-kbtp7\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.429314 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.544460 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.706402 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerStarted","Data":"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604"} Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.707880 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.710209 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-776c7c9864-7v9mt_a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0/console/0.log" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.710308 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-776c7c9864-7v9mt" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.710323 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-776c7c9864-7v9mt" event={"ID":"a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0","Type":"ContainerDied","Data":"212d19f207b05628a96c239844ddb7a3e0807a834bd2b21fb84adcd528f70724"} Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.710360 4791 scope.go:117] "RemoveContainer" containerID="f198e41ac37ae308322af87d21db9d60e1bd1aa3723a98ee586d6f66d01b3b26" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.732699 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"4f9370cf19730ab31430ae65a300d050aa70f8fd94f8c3de9c40e4279d64d33c"} Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.734620 4791 generic.go:334] "Generic (PLEG): container finished" podID="3c66a47b-ef25-4487-b69c-1a7c7454631d" containerID="57f261c0206fe61ff589d5df0dcb9ea8dd3870246717bbcac307b2d9d1cde62f" exitCode=0 Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.734921 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" event={"ID":"3c66a47b-ef25-4487-b69c-1a7c7454631d","Type":"ContainerDied","Data":"57f261c0206fe61ff589d5df0dcb9ea8dd3870246717bbcac307b2d9d1cde62f"} Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.759416 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=55.759398037 podStartE2EDuration="55.759398037s" podCreationTimestamp="2026-02-18 00:54:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:38.743883375 +0000 UTC m=+1280.311896545" watchObservedRunningTime="2026-02-18 00:55:38.759398037 +0000 UTC m=+1280.327411197" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.831044 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.861986 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-776c7c9864-7v9mt"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.913512 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-5kfk7"] Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.914681 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.917827 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:38 crc kubenswrapper[4791]: I0218 00:55:38.935932 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5kfk7"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.001260 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts\") pod \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.001681 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pksdn\" (UniqueName: \"kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn\") pod \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\" (UID: \"f09f99ff-9d2b-48c9-96d3-5b39bee97442\") " Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.002080 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6pg4\" (UniqueName: \"kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.002123 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.002650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f09f99ff-9d2b-48c9-96d3-5b39bee97442" (UID: "f09f99ff-9d2b-48c9-96d3-5b39bee97442"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.013104 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn" (OuterVolumeSpecName: "kube-api-access-pksdn") pod "f09f99ff-9d2b-48c9-96d3-5b39bee97442" (UID: "f09f99ff-9d2b-48c9-96d3-5b39bee97442"). InnerVolumeSpecName "kube-api-access-pksdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.110521 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6pg4\" (UniqueName: \"kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.110768 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.110940 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f09f99ff-9d2b-48c9-96d3-5b39bee97442-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.110993 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pksdn\" (UniqueName: \"kubernetes.io/projected/f09f99ff-9d2b-48c9-96d3-5b39bee97442-kube-api-access-pksdn\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.111642 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.136709 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6pg4\" (UniqueName: \"kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4\") pod \"keystone-db-create-5kfk7\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.141611 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0" path="/var/lib/kubelet/pods/a48d5f86-6d9c-4d1f-bef3-dd97b8bf44a0/volumes" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.142510 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bd9d-account-create-update-6nfrs"] Feb 18 00:55:39 crc kubenswrapper[4791]: E0218 00:55:39.143140 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f09f99ff-9d2b-48c9-96d3-5b39bee97442" containerName="mariadb-account-create-update" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.143234 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f09f99ff-9d2b-48c9-96d3-5b39bee97442" containerName="mariadb-account-create-update" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.143567 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f09f99ff-9d2b-48c9-96d3-5b39bee97442" containerName="mariadb-account-create-update" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.144466 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bd9d-account-create-update-6nfrs"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.144587 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.146936 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.215268 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.216375 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j555m\" (UniqueName: \"kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.246461 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.324076 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j555m\" (UniqueName: \"kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.331606 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.332667 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.339440 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-x8dzl"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.341033 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.349998 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-e552-account-create-update-lbd8g"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.351513 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.360894 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.370560 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-x8dzl"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.372426 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j555m\" (UniqueName: \"kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m\") pod \"keystone-bd9d-account-create-update-6nfrs\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.386297 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e552-account-create-update-lbd8g"] Feb 18 00:55:39 crc kubenswrapper[4791]: W0218 00:55:39.395930 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a71fec1_6980_4269_88ef_bb233a4f35a4.slice/crio-e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7 WatchSource:0}: Error finding container e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7: Status 404 returned error can't find the container with id e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7 Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.396591 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5db2-account-create-update-kbtp7"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.435379 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjcr6\" (UniqueName: \"kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.435451 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.435492 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.435524 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nm2v\" (UniqueName: \"kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.442354 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9mfr2"] Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.517755 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.537553 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjcr6\" (UniqueName: \"kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.537609 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.537639 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.537687 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nm2v\" (UniqueName: \"kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.539143 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.539516 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.551868 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.589630 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nm2v\" (UniqueName: \"kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v\") pod \"placement-e552-account-create-update-lbd8g\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.598879 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjcr6\" (UniqueName: \"kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6\") pod \"placement-db-create-x8dzl\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.639066 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts\") pod \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.639148 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnpdn\" (UniqueName: \"kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn\") pod \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\" (UID: \"ba41ccfb-9ea8-47e2-a814-d958e7ced77f\") " Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.640011 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ba41ccfb-9ea8-47e2-a814-d958e7ced77f" (UID: "ba41ccfb-9ea8-47e2-a814-d958e7ced77f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.652619 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn" (OuterVolumeSpecName: "kube-api-access-cnpdn") pod "ba41ccfb-9ea8-47e2-a814-d958e7ced77f" (UID: "ba41ccfb-9ea8-47e2-a814-d958e7ced77f"). InnerVolumeSpecName "kube-api-access-cnpdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.666401 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.727673 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.741308 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.741337 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnpdn\" (UniqueName: \"kubernetes.io/projected/ba41ccfb-9ea8-47e2-a814-d958e7ced77f-kube-api-access-cnpdn\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.760723 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9mfr2" event={"ID":"6a71fec1-6980-4269-88ef-bb233a4f35a4","Type":"ContainerStarted","Data":"04c44a90792f718817da2cdadbb0cff6d9972a8c74cf719e02d706058a86a9b3"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.760760 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9mfr2" event={"ID":"6a71fec1-6980-4269-88ef-bb233a4f35a4","Type":"ContainerStarted","Data":"e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.764427 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" event={"ID":"ba41ccfb-9ea8-47e2-a814-d958e7ced77f","Type":"ContainerDied","Data":"0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.764462 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af1bbdd36ef48f341f16d8874679894e4f9b3d1fba208b0b1751934e3e4326e" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.764523 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.770191 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-k2mq5" event={"ID":"f09f99ff-9d2b-48c9-96d3-5b39bee97442","Type":"ContainerDied","Data":"6f9274f4739974401e0de29dd7074a3a77151843531b770c57704c7bebba8591"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.770226 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f9274f4739974401e0de29dd7074a3a77151843531b770c57704c7bebba8591" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.770300 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-k2mq5" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.774413 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5db2-account-create-update-kbtp7" event={"ID":"c7641efb-f2fc-47fe-806d-074f2de15773","Type":"ContainerStarted","Data":"6aff4f3a75b912faf1517c2040a1ceda1b60cdb7a5139050118a4fb810dec87f"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.774440 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5db2-account-create-update-kbtp7" event={"ID":"c7641efb-f2fc-47fe-806d-074f2de15773","Type":"ContainerStarted","Data":"27b049097bb450bb8010e132d3388281df6ef888bd52dffc91f0090c61fff4eb"} Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.782842 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-9mfr2" podStartSLOduration=1.782826269 podStartE2EDuration="1.782826269s" podCreationTimestamp="2026-02-18 00:55:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:39.778610087 +0000 UTC m=+1281.346623257" watchObservedRunningTime="2026-02-18 00:55:39.782826269 +0000 UTC m=+1281.350839429" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.807668 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-5db2-account-create-update-kbtp7" podStartSLOduration=1.807652381 podStartE2EDuration="1.807652381s" podCreationTimestamp="2026-02-18 00:55:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:39.801231171 +0000 UTC m=+1281.369244341" watchObservedRunningTime="2026-02-18 00:55:39.807652381 +0000 UTC m=+1281.375665551" Feb 18 00:55:39 crc kubenswrapper[4791]: I0218 00:55:39.939721 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5kfk7"] Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.278856 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.356350 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rnrv\" (UniqueName: \"kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv\") pod \"3c66a47b-ef25-4487-b69c-1a7c7454631d\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.356748 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts\") pod \"3c66a47b-ef25-4487-b69c-1a7c7454631d\" (UID: \"3c66a47b-ef25-4487-b69c-1a7c7454631d\") " Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.357896 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c66a47b-ef25-4487-b69c-1a7c7454631d" (UID: "3c66a47b-ef25-4487-b69c-1a7c7454631d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.363147 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv" (OuterVolumeSpecName: "kube-api-access-2rnrv") pod "3c66a47b-ef25-4487-b69c-1a7c7454631d" (UID: "3c66a47b-ef25-4487-b69c-1a7c7454631d"). InnerVolumeSpecName "kube-api-access-2rnrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.461613 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rnrv\" (UniqueName: \"kubernetes.io/projected/3c66a47b-ef25-4487-b69c-1a7c7454631d-kube-api-access-2rnrv\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.461646 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c66a47b-ef25-4487-b69c-1a7c7454631d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.729393 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bd9d-account-create-update-6nfrs"] Feb 18 00:55:40 crc kubenswrapper[4791]: W0218 00:55:40.754649 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61227f77_9a0c_48f9_8dda_9fa75c00b71a.slice/crio-75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37 WatchSource:0}: Error finding container 75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37: Status 404 returned error can't find the container with id 75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37 Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.793110 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"b8a1fd406d175a17ab46d11bebc38399457cdee865f7f027682aeee0ea8bc4bb"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.798044 4791 generic.go:334] "Generic (PLEG): container finished" podID="c7641efb-f2fc-47fe-806d-074f2de15773" containerID="6aff4f3a75b912faf1517c2040a1ceda1b60cdb7a5139050118a4fb810dec87f" exitCode=0 Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.798087 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5db2-account-create-update-kbtp7" event={"ID":"c7641efb-f2fc-47fe-806d-074f2de15773","Type":"ContainerDied","Data":"6aff4f3a75b912faf1517c2040a1ceda1b60cdb7a5139050118a4fb810dec87f"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.818363 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.818353 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-cae6-account-create-update-jthwt" event={"ID":"3c66a47b-ef25-4487-b69c-1a7c7454631d","Type":"ContainerDied","Data":"9de93ddf07dd254d7ffe143159c021e5a8e154803c4440ab298c905d165040bc"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.818475 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9de93ddf07dd254d7ffe143159c021e5a8e154803c4440ab298c905d165040bc" Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.851850 4791 generic.go:334] "Generic (PLEG): container finished" podID="6a71fec1-6980-4269-88ef-bb233a4f35a4" containerID="04c44a90792f718817da2cdadbb0cff6d9972a8c74cf719e02d706058a86a9b3" exitCode=0 Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.852050 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9mfr2" event={"ID":"6a71fec1-6980-4269-88ef-bb233a4f35a4","Type":"ContainerDied","Data":"04c44a90792f718817da2cdadbb0cff6d9972a8c74cf719e02d706058a86a9b3"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.869366 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5kfk7" event={"ID":"307ef073-8e53-4545-9da7-88c82a038dd0","Type":"ContainerStarted","Data":"c87acc8bd3df9976cb5b04770c7953204386cfd101eee67fcc4f5b2a4a08cf2d"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.869428 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5kfk7" event={"ID":"307ef073-8e53-4545-9da7-88c82a038dd0","Type":"ContainerStarted","Data":"7619e5e0e0ecc01cd47d792e2435244e75ba175f3d1dd28a532d1c2bd9ba3212"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.875676 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd9d-account-create-update-6nfrs" event={"ID":"61227f77-9a0c-48f9-8dda-9fa75c00b71a","Type":"ContainerStarted","Data":"75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37"} Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.914215 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-x8dzl"] Feb 18 00:55:40 crc kubenswrapper[4791]: I0218 00:55:40.929508 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-5kfk7" podStartSLOduration=2.929485642 podStartE2EDuration="2.929485642s" podCreationTimestamp="2026-02-18 00:55:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:40.897515788 +0000 UTC m=+1282.465528959" watchObservedRunningTime="2026-02-18 00:55:40.929485642 +0000 UTC m=+1282.497498812" Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.046058 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e552-account-create-update-lbd8g"] Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.191213 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-k2mq5"] Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.203963 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-k2mq5"] Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.893342 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"9352f65cc162017a22955e60d8730228e98061cf1d331746b5e656912fa32f67"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.893667 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"7486ed0bd62724196d43826c99b35a9341eeb3ff5f3780f6fa6be0c7a8ebaef4"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.895618 4791 generic.go:334] "Generic (PLEG): container finished" podID="f40402ed-56dc-452a-9fc4-46008591a6ab" containerID="185b5dd3d2ace79e2994d44192f4e52e6d0979939737042d25b813e126de9f31" exitCode=0 Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.895729 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x8dzl" event={"ID":"f40402ed-56dc-452a-9fc4-46008591a6ab","Type":"ContainerDied","Data":"185b5dd3d2ace79e2994d44192f4e52e6d0979939737042d25b813e126de9f31"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.895761 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x8dzl" event={"ID":"f40402ed-56dc-452a-9fc4-46008591a6ab","Type":"ContainerStarted","Data":"30f8776711cafc7eebf0f6f363c5b86b59cb8de359dfeb3f7e54b78f3ab5675f"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.897397 4791 generic.go:334] "Generic (PLEG): container finished" podID="307ef073-8e53-4545-9da7-88c82a038dd0" containerID="c87acc8bd3df9976cb5b04770c7953204386cfd101eee67fcc4f5b2a4a08cf2d" exitCode=0 Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.897811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5kfk7" event={"ID":"307ef073-8e53-4545-9da7-88c82a038dd0","Type":"ContainerDied","Data":"c87acc8bd3df9976cb5b04770c7953204386cfd101eee67fcc4f5b2a4a08cf2d"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.902575 4791 generic.go:334] "Generic (PLEG): container finished" podID="61227f77-9a0c-48f9-8dda-9fa75c00b71a" containerID="1f5c8685a3c26562ce8e876c68918acf9268d83c71bd267cf21c31058d4b8d42" exitCode=0 Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.902644 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd9d-account-create-update-6nfrs" event={"ID":"61227f77-9a0c-48f9-8dda-9fa75c00b71a","Type":"ContainerDied","Data":"1f5c8685a3c26562ce8e876c68918acf9268d83c71bd267cf21c31058d4b8d42"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.908508 4791 generic.go:334] "Generic (PLEG): container finished" podID="1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" containerID="3cdd0d549303a2310267b619bffd2908656f73a39776ad6804cf3aba5a74a45b" exitCode=0 Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.908691 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e552-account-create-update-lbd8g" event={"ID":"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b","Type":"ContainerDied","Data":"3cdd0d549303a2310267b619bffd2908656f73a39776ad6804cf3aba5a74a45b"} Feb 18 00:55:41 crc kubenswrapper[4791]: I0218 00:55:41.908756 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e552-account-create-update-lbd8g" event={"ID":"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b","Type":"ContainerStarted","Data":"7479cb92fde269612c90de6088231122548dd05e88f73b24fc2c5314dd9047d6"} Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.056908 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.422488 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.431630 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.507086 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxxcc\" (UniqueName: \"kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc\") pod \"6a71fec1-6980-4269-88ef-bb233a4f35a4\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.507346 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts\") pod \"c7641efb-f2fc-47fe-806d-074f2de15773\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.507461 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts\") pod \"6a71fec1-6980-4269-88ef-bb233a4f35a4\" (UID: \"6a71fec1-6980-4269-88ef-bb233a4f35a4\") " Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.507582 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml\") pod \"c7641efb-f2fc-47fe-806d-074f2de15773\" (UID: \"c7641efb-f2fc-47fe-806d-074f2de15773\") " Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.509404 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c7641efb-f2fc-47fe-806d-074f2de15773" (UID: "c7641efb-f2fc-47fe-806d-074f2de15773"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.511781 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6a71fec1-6980-4269-88ef-bb233a4f35a4" (UID: "6a71fec1-6980-4269-88ef-bb233a4f35a4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.513556 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml" (OuterVolumeSpecName: "kube-api-access-dtjml") pod "c7641efb-f2fc-47fe-806d-074f2de15773" (UID: "c7641efb-f2fc-47fe-806d-074f2de15773"). InnerVolumeSpecName "kube-api-access-dtjml". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.516301 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc" (OuterVolumeSpecName: "kube-api-access-hxxcc") pod "6a71fec1-6980-4269-88ef-bb233a4f35a4" (UID: "6a71fec1-6980-4269-88ef-bb233a4f35a4"). InnerVolumeSpecName "kube-api-access-hxxcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.610793 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtjml\" (UniqueName: \"kubernetes.io/projected/c7641efb-f2fc-47fe-806d-074f2de15773-kube-api-access-dtjml\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.610826 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxxcc\" (UniqueName: \"kubernetes.io/projected/6a71fec1-6980-4269-88ef-bb233a4f35a4-kube-api-access-hxxcc\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.610840 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7641efb-f2fc-47fe-806d-074f2de15773-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.610851 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a71fec1-6980-4269-88ef-bb233a4f35a4-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.918452 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"41d94d6649be378f74e18bc15fb069cab9c0cd9d765a338f848440714e11af6c"} Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.921491 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5db2-account-create-update-kbtp7" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.921483 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5db2-account-create-update-kbtp7" event={"ID":"c7641efb-f2fc-47fe-806d-074f2de15773","Type":"ContainerDied","Data":"27b049097bb450bb8010e132d3388281df6ef888bd52dffc91f0090c61fff4eb"} Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.921632 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27b049097bb450bb8010e132d3388281df6ef888bd52dffc91f0090c61fff4eb" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.923646 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9mfr2" event={"ID":"6a71fec1-6980-4269-88ef-bb233a4f35a4","Type":"ContainerDied","Data":"e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7"} Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.923700 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e88ed2d6200873da294241fc2886b857f2c4329b0aea0f1bb6a53a23d2af5af7" Feb 18 00:55:42 crc kubenswrapper[4791]: I0218 00:55:42.923698 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9mfr2" Feb 18 00:55:43 crc kubenswrapper[4791]: I0218 00:55:43.074938 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f09f99ff-9d2b-48c9-96d3-5b39bee97442" path="/var/lib/kubelet/pods/f09f99ff-9d2b-48c9-96d3-5b39bee97442/volumes" Feb 18 00:55:43 crc kubenswrapper[4791]: I0218 00:55:43.080555 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5shlq" podUID="54deb01e-caa1-4fe0-8bd0-d412c4d73210" containerName="ovn-controller" probeResult="failure" output=< Feb 18 00:55:43 crc kubenswrapper[4791]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 18 00:55:43 crc kubenswrapper[4791]: > Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.952174 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-2vccl"] Feb 18 00:55:44 crc kubenswrapper[4791]: E0218 00:55:44.953179 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7641efb-f2fc-47fe-806d-074f2de15773" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953193 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7641efb-f2fc-47fe-806d-074f2de15773" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: E0218 00:55:44.953205 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a71fec1-6980-4269-88ef-bb233a4f35a4" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953211 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a71fec1-6980-4269-88ef-bb233a4f35a4" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: E0218 00:55:44.953234 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c66a47b-ef25-4487-b69c-1a7c7454631d" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953242 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c66a47b-ef25-4487-b69c-1a7c7454631d" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: E0218 00:55:44.953262 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba41ccfb-9ea8-47e2-a814-d958e7ced77f" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953267 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba41ccfb-9ea8-47e2-a814-d958e7ced77f" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953593 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c66a47b-ef25-4487-b69c-1a7c7454631d" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953611 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7641efb-f2fc-47fe-806d-074f2de15773" containerName="mariadb-account-create-update" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953629 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba41ccfb-9ea8-47e2-a814-d958e7ced77f" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.953642 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a71fec1-6980-4269-88ef-bb233a4f35a4" containerName="mariadb-database-create" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.954329 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.956819 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 18 00:55:44 crc kubenswrapper[4791]: I0218 00:55:44.971658 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-2vccl"] Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.077995 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nvg4\" (UniqueName: \"kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.078220 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.179454 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.179576 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nvg4\" (UniqueName: \"kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.180563 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.201393 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nvg4\" (UniqueName: \"kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4\") pod \"root-account-create-update-2vccl\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.304616 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.462766 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.962061 4791 generic.go:334] "Generic (PLEG): container finished" podID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerID="c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5" exitCode=0 Feb 18 00:55:45 crc kubenswrapper[4791]: I0218 00:55:45.962118 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerDied","Data":"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5"} Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.260504 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.280949 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.307404 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.307474 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.307511 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k62jd\" (UniqueName: \"kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.313087 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.323938 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.345637 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.345921 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.409172 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjcr6\" (UniqueName: \"kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6\") pod \"f40402ed-56dc-452a-9fc4-46008591a6ab\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.409235 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nm2v\" (UniqueName: \"kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v\") pod \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.410829 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts\") pod \"f40402ed-56dc-452a-9fc4-46008591a6ab\" (UID: \"f40402ed-56dc-452a-9fc4-46008591a6ab\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.411391 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts\") pod \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\" (UID: \"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412018 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412014 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f40402ed-56dc-452a-9fc4-46008591a6ab" (UID: "f40402ed-56dc-452a-9fc4-46008591a6ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412141 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412211 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k62jd\" (UniqueName: \"kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412694 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" (UID: "1568271d-f2b8-4f5a-abf8-cd9ea1e6750b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412841 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.412860 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40402ed-56dc-452a-9fc4-46008591a6ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.420763 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6" (OuterVolumeSpecName: "kube-api-access-rjcr6") pod "f40402ed-56dc-452a-9fc4-46008591a6ab" (UID: "f40402ed-56dc-452a-9fc4-46008591a6ab"). InnerVolumeSpecName "kube-api-access-rjcr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.422267 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.423222 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.423982 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.424348 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v" (OuterVolumeSpecName: "kube-api-access-9nm2v") pod "1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" (UID: "1568271d-f2b8-4f5a-abf8-cd9ea1e6750b"). InnerVolumeSpecName "kube-api-access-9nm2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.424947 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.443881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k62jd\" (UniqueName: \"kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd\") pod \"mysqld-exporter-0\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.520943 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjcr6\" (UniqueName: \"kubernetes.io/projected/f40402ed-56dc-452a-9fc4-46008591a6ab-kube-api-access-rjcr6\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.520973 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nm2v\" (UniqueName: \"kubernetes.io/projected/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b-kube-api-access-9nm2v\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.570032 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.622045 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6pg4\" (UniqueName: \"kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4\") pod \"307ef073-8e53-4545-9da7-88c82a038dd0\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.622378 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j555m\" (UniqueName: \"kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m\") pod \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.622528 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts\") pod \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\" (UID: \"61227f77-9a0c-48f9-8dda-9fa75c00b71a\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.622668 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts\") pod \"307ef073-8e53-4545-9da7-88c82a038dd0\" (UID: \"307ef073-8e53-4545-9da7-88c82a038dd0\") " Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.624318 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "307ef073-8e53-4545-9da7-88c82a038dd0" (UID: "307ef073-8e53-4545-9da7-88c82a038dd0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.624731 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "61227f77-9a0c-48f9-8dda-9fa75c00b71a" (UID: "61227f77-9a0c-48f9-8dda-9fa75c00b71a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.630108 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m" (OuterVolumeSpecName: "kube-api-access-j555m") pod "61227f77-9a0c-48f9-8dda-9fa75c00b71a" (UID: "61227f77-9a0c-48f9-8dda-9fa75c00b71a"). InnerVolumeSpecName "kube-api-access-j555m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.630397 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4" (OuterVolumeSpecName: "kube-api-access-d6pg4") pod "307ef073-8e53-4545-9da7-88c82a038dd0" (UID: "307ef073-8e53-4545-9da7-88c82a038dd0"). InnerVolumeSpecName "kube-api-access-d6pg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.725069 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6pg4\" (UniqueName: \"kubernetes.io/projected/307ef073-8e53-4545-9da7-88c82a038dd0-kube-api-access-d6pg4\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.725093 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j555m\" (UniqueName: \"kubernetes.io/projected/61227f77-9a0c-48f9-8dda-9fa75c00b71a-kube-api-access-j555m\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.725102 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/61227f77-9a0c-48f9-8dda-9fa75c00b71a-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.725110 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/307ef073-8e53-4545-9da7-88c82a038dd0-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.899804 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-2vccl"] Feb 18 00:55:46 crc kubenswrapper[4791]: W0218 00:55:46.907786 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01db7dd2_a23a_4ba0_8355_9175499d6658.slice/crio-0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e WatchSource:0}: Error finding container 0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e: Status 404 returned error can't find the container with id 0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.983118 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerStarted","Data":"dab91d4e5233a769a0bcb3480c1216b8f00d5ed454db656fad040cef4bc4c5df"} Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.984892 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-x8dzl" event={"ID":"f40402ed-56dc-452a-9fc4-46008591a6ab","Type":"ContainerDied","Data":"30f8776711cafc7eebf0f6f363c5b86b59cb8de359dfeb3f7e54b78f3ab5675f"} Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.984921 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30f8776711cafc7eebf0f6f363c5b86b59cb8de359dfeb3f7e54b78f3ab5675f" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.984984 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-x8dzl" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.993730 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2vccl" event={"ID":"01db7dd2-a23a-4ba0-8355-9175499d6658","Type":"ContainerStarted","Data":"0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e"} Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.995780 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5kfk7" event={"ID":"307ef073-8e53-4545-9da7-88c82a038dd0","Type":"ContainerDied","Data":"7619e5e0e0ecc01cd47d792e2435244e75ba175f3d1dd28a532d1c2bd9ba3212"} Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.995820 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7619e5e0e0ecc01cd47d792e2435244e75ba175f3d1dd28a532d1c2bd9ba3212" Feb 18 00:55:46 crc kubenswrapper[4791]: I0218 00:55:46.995890 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5kfk7" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.005200 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bd9d-account-create-update-6nfrs" event={"ID":"61227f77-9a0c-48f9-8dda-9fa75c00b71a","Type":"ContainerDied","Data":"75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37"} Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.005925 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75ec725ea565e4f3c0781338505f0567c99fafbe9824cdc983e63f3cc394ba37" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.005292 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bd9d-account-create-update-6nfrs" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.011875 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerStarted","Data":"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5"} Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.012946 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.017913 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e552-account-create-update-lbd8g" event={"ID":"1568271d-f2b8-4f5a-abf8-cd9ea1e6750b","Type":"ContainerDied","Data":"7479cb92fde269612c90de6088231122548dd05e88f73b24fc2c5314dd9047d6"} Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.017950 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7479cb92fde269612c90de6088231122548dd05e88f73b24fc2c5314dd9047d6" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.018007 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e552-account-create-update-lbd8g" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.070850 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=64.070829326 podStartE2EDuration="1m4.070829326s" podCreationTimestamp="2026-02-18 00:54:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:47.054296322 +0000 UTC m=+1288.622309492" watchObservedRunningTime="2026-02-18 00:55:47.070829326 +0000 UTC m=+1288.638842506" Feb 18 00:55:47 crc kubenswrapper[4791]: I0218 00:55:47.104777 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.043870 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"a774a9336e5a0afcaa3492a3033d8b41fa73b6d893b5e49bcd2e2d245ed70a9c"} Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.057058 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"797178cc-4ac7-4338-88d7-b80656cd9566","Type":"ContainerStarted","Data":"eb0c8cd7e6f8d03a2d0f5e52cef8f62a5846398fdf5b2058114ae0fbe5103ab9"} Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.063767 4791 generic.go:334] "Generic (PLEG): container finished" podID="01db7dd2-a23a-4ba0-8355-9175499d6658" containerID="d28a2829a5f749af05c7386c584f40092b30f5afe8fb16c9c4f6c4e75db990c8" exitCode=0 Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.065132 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2vccl" event={"ID":"01db7dd2-a23a-4ba0-8355-9175499d6658","Type":"ContainerDied","Data":"d28a2829a5f749af05c7386c584f40092b30f5afe8fb16c9c4f6c4e75db990c8"} Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.091528 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5shlq" podUID="54deb01e-caa1-4fe0-8bd0-d412c4d73210" containerName="ovn-controller" probeResult="failure" output=< Feb 18 00:55:48 crc kubenswrapper[4791]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 18 00:55:48 crc kubenswrapper[4791]: > Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.129651 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.334958 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wx5vg"] Feb 18 00:55:48 crc kubenswrapper[4791]: E0218 00:55:48.335354 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="307ef073-8e53-4545-9da7-88c82a038dd0" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335371 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="307ef073-8e53-4545-9da7-88c82a038dd0" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: E0218 00:55:48.335400 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335407 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: E0218 00:55:48.335419 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f40402ed-56dc-452a-9fc4-46008591a6ab" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335425 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f40402ed-56dc-452a-9fc4-46008591a6ab" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: E0218 00:55:48.335438 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61227f77-9a0c-48f9-8dda-9fa75c00b71a" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335444 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="61227f77-9a0c-48f9-8dda-9fa75c00b71a" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335626 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="61227f77-9a0c-48f9-8dda-9fa75c00b71a" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335641 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f40402ed-56dc-452a-9fc4-46008591a6ab" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335664 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" containerName="mariadb-account-create-update" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.335674 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="307ef073-8e53-4545-9da7-88c82a038dd0" containerName="mariadb-database-create" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.345473 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.363373 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5r5zk" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.363967 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.374322 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wx5vg"] Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.465895 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.466108 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.466188 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4fk4\" (UniqueName: \"kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.466324 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.568147 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.568559 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.568719 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.568745 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4fk4\" (UniqueName: \"kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.574711 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.574946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.575917 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.592712 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4fk4\" (UniqueName: \"kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4\") pod \"glance-db-sync-wx5vg\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:48 crc kubenswrapper[4791]: I0218 00:55:48.675482 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wx5vg" Feb 18 00:55:49 crc kubenswrapper[4791]: I0218 00:55:49.076684 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"732cdaa7b624d408b9f82e239ef1c485a455f1df4dbfff1e053443c8b1dcf47e"} Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.015219 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.097725 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2vccl" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.098433 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts\") pod \"01db7dd2-a23a-4ba0-8355-9175499d6658\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.098472 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nvg4\" (UniqueName: \"kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4\") pod \"01db7dd2-a23a-4ba0-8355-9175499d6658\" (UID: \"01db7dd2-a23a-4ba0-8355-9175499d6658\") " Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.098528 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2vccl" event={"ID":"01db7dd2-a23a-4ba0-8355-9175499d6658","Type":"ContainerDied","Data":"0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e"} Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.098556 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0aec0f90c2ebd04b8fba65b7416ce7f53bc62eea2c4b189117b246fef93d789e" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.100501 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "01db7dd2-a23a-4ba0-8355-9175499d6658" (UID: "01db7dd2-a23a-4ba0-8355-9175499d6658"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.102809 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerStarted","Data":"804a33d2882f7f64057b30535001a5e4734a265390320e6c2f4ec6777045afde"} Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.106784 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4" (OuterVolumeSpecName: "kube-api-access-5nvg4") pod "01db7dd2-a23a-4ba0-8355-9175499d6658" (UID: "01db7dd2-a23a-4ba0-8355-9175499d6658"). InnerVolumeSpecName "kube-api-access-5nvg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.201562 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01db7dd2-a23a-4ba0-8355-9175499d6658-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.201606 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nvg4\" (UniqueName: \"kubernetes.io/projected/01db7dd2-a23a-4ba0-8355-9175499d6658-kube-api-access-5nvg4\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:50 crc kubenswrapper[4791]: I0218 00:55:50.494306 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wx5vg"] Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.117090 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"680142cbf5c561a7a7c22ef37622116640f2af5c6621b50c542977736c989a18"} Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.117131 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"b4bd4c5b2666dcd480c40c68f8e3e7fc368e920d12968cdd2fa13cdd6fb775ad"} Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.119069 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"797178cc-4ac7-4338-88d7-b80656cd9566","Type":"ContainerStarted","Data":"2b91226754d78ba69b8f9a7179313ca25dfd26860d09756a8959ba659a128373"} Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.120816 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wx5vg" event={"ID":"e48266b0-04c5-4da5-994e-516ef8e36299","Type":"ContainerStarted","Data":"297d88e0754c0d95fd8511353f36084ef2576c3c07b4726ddab6e84af660d35b"} Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.134921 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.693360537 podStartE2EDuration="5.134907183s" podCreationTimestamp="2026-02-18 00:55:46 +0000 UTC" firstStartedPulling="2026-02-18 00:55:47.327782387 +0000 UTC m=+1288.895795557" lastFinishedPulling="2026-02-18 00:55:49.769329033 +0000 UTC m=+1291.337342203" observedRunningTime="2026-02-18 00:55:51.134482769 +0000 UTC m=+1292.702495939" watchObservedRunningTime="2026-02-18 00:55:51.134907183 +0000 UTC m=+1292.702920353" Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.216307 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-2vccl"] Feb 18 00:55:51 crc kubenswrapper[4791]: I0218 00:55:51.254279 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-2vccl"] Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.050051 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5shlq" podUID="54deb01e-caa1-4fe0-8bd0-d412c4d73210" containerName="ovn-controller" probeResult="failure" output=< Feb 18 00:55:53 crc kubenswrapper[4791]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Feb 18 00:55:53 crc kubenswrapper[4791]: > Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.075296 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01db7dd2-a23a-4ba0-8355-9175499d6658" path="/var/lib/kubelet/pods/01db7dd2-a23a-4ba0-8355-9175499d6658/volumes" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.163530 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerStarted","Data":"56f1bdcf65bc44f2090c0827ec52a299f6410d974706f5259f3a57cb62968094"} Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.174037 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"5a3310d036696aa493d8a1a1c9bae502eb5d0d2879c9e437af45cf212b9abf89"} Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.178861 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-l4g2x" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.193367 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=14.582989189 podStartE2EDuration="1m3.193351666s" podCreationTimestamp="2026-02-18 00:54:50 +0000 UTC" firstStartedPulling="2026-02-18 00:55:04.096048468 +0000 UTC m=+1245.664061638" lastFinishedPulling="2026-02-18 00:55:52.706410945 +0000 UTC m=+1294.274424115" observedRunningTime="2026-02-18 00:55:53.186468832 +0000 UTC m=+1294.754482012" watchObservedRunningTime="2026-02-18 00:55:53.193351666 +0000 UTC m=+1294.761364836" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.391715 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5shlq-config-z58pz"] Feb 18 00:55:53 crc kubenswrapper[4791]: E0218 00:55:53.392278 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01db7dd2-a23a-4ba0-8355-9175499d6658" containerName="mariadb-account-create-update" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.392296 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="01db7dd2-a23a-4ba0-8355-9175499d6658" containerName="mariadb-account-create-update" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.392521 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="01db7dd2-a23a-4ba0-8355-9175499d6658" containerName="mariadb-account-create-update" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.393228 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.396269 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.405065 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq-config-z58pz"] Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474402 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474571 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474728 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfgld\" (UniqueName: \"kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474886 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.474941 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577577 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577655 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577703 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfgld\" (UniqueName: \"kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577761 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577791 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.577911 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.578274 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.578342 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.578374 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.578824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.580032 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.595688 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfgld\" (UniqueName: \"kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld\") pod \"ovn-controller-5shlq-config-z58pz\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:53 crc kubenswrapper[4791]: I0218 00:55:53.716894 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.188308 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"a4f3ee04f1f0b745500106530ca237fe4236aa7bb736dfb7ab8cc5b6d925a081"} Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.188531 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"65ad09abcb9b7c907f582f6a7a90cae4c33326746d125e20c34803d109eaf9bc"} Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.282836 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq-config-z58pz"] Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.774802 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.969403 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-xkz8j"] Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.970814 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.974053 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Feb 18 00:55:54 crc kubenswrapper[4791]: I0218 00:55:54.981352 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-xkz8j"] Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.009566 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.009667 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29b28\" (UniqueName: \"kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.107324 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.111658 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.111746 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29b28\" (UniqueName: \"kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.112523 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.139264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29b28\" (UniqueName: \"kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28\") pod \"root-account-create-update-xkz8j\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.157617 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.214396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"789a6825184b34c04c25bf6b09ad74c5b58ce7bdb7c498526e5ba54b81185c73"} Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.214468 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"a11ba46863138aa59df8fad4b14acc64ae94d6226138ed4df277551c3748f5a8"} Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.224363 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.227498 4791 generic.go:334] "Generic (PLEG): container finished" podID="52094b3f-9dc5-422b-a44d-bd05f21be161" containerID="61d1139739e53f052f116d30768f8dd16ba921eb312a90b1b4b3c0a05f784bd1" exitCode=0 Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.227553 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-z58pz" event={"ID":"52094b3f-9dc5-422b-a44d-bd05f21be161","Type":"ContainerDied","Data":"61d1139739e53f052f116d30768f8dd16ba921eb312a90b1b4b3c0a05f784bd1"} Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.227577 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-z58pz" event={"ID":"52094b3f-9dc5-422b-a44d-bd05f21be161","Type":"ContainerStarted","Data":"e84df90b1135f676606903587a35cf5acdbb734b7e001b9903df4b23e968f339"} Feb 18 00:55:55 crc kubenswrapper[4791]: I0218 00:55:55.767523 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-xkz8j"] Feb 18 00:55:55 crc kubenswrapper[4791]: W0218 00:55:55.772692 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fc69d93_6595_4c89_aca5_052479e976f0.slice/crio-239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215 WatchSource:0}: Error finding container 239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215: Status 404 returned error can't find the container with id 239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215 Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.252378 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"aa240002bf77880f22a683aed22ba7efc2281598ec2249d3dabfd871f1ef10cc"} Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.252807 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"569159b6-791b-428c-84c7-5387c17a731b","Type":"ContainerStarted","Data":"f1c972323305a274ba3d58988bb40d28c38806e8ad7537963848bd1f2042899d"} Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.255441 4791 generic.go:334] "Generic (PLEG): container finished" podID="9fc69d93-6595-4c89-aca5-052479e976f0" containerID="e1dfe2a5310805d76494d6704dc8f2dcbf23c7e485dcdfafdded666cca69479f" exitCode=0 Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.255705 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xkz8j" event={"ID":"9fc69d93-6595-4c89-aca5-052479e976f0","Type":"ContainerDied","Data":"e1dfe2a5310805d76494d6704dc8f2dcbf23c7e485dcdfafdded666cca69479f"} Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.255797 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xkz8j" event={"ID":"9fc69d93-6595-4c89-aca5-052479e976f0","Type":"ContainerStarted","Data":"239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215"} Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.299316 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=21.980349536 podStartE2EDuration="36.29930128s" podCreationTimestamp="2026-02-18 00:55:20 +0000 UTC" firstStartedPulling="2026-02-18 00:55:38.357679596 +0000 UTC m=+1279.925692766" lastFinishedPulling="2026-02-18 00:55:52.67663134 +0000 UTC m=+1294.244644510" observedRunningTime="2026-02-18 00:55:56.296517834 +0000 UTC m=+1297.864531004" watchObservedRunningTime="2026-02-18 00:55:56.29930128 +0000 UTC m=+1297.867314450" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.598193 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.622293 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:55:56 crc kubenswrapper[4791]: E0218 00:55:56.622713 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52094b3f-9dc5-422b-a44d-bd05f21be161" containerName="ovn-config" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.622735 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="52094b3f-9dc5-422b-a44d-bd05f21be161" containerName="ovn-config" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.623000 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="52094b3f-9dc5-422b-a44d-bd05f21be161" containerName="ovn-config" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.629818 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.637189 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.638222 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.699944 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743069 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfgld\" (UniqueName: \"kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743252 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743298 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743332 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743369 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743382 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743433 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts\") pod \"52094b3f-9dc5-422b-a44d-bd05f21be161\" (UID: \"52094b3f-9dc5-422b-a44d-bd05f21be161\") " Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743424 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run" (OuterVolumeSpecName: "var-run") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743466 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743801 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743874 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743918 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743936 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743969 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.743990 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh6jx\" (UniqueName: \"kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.744044 4791 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.744055 4791 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.744073 4791 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/52094b3f-9dc5-422b-a44d-bd05f21be161-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.744110 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.744496 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts" (OuterVolumeSpecName: "scripts") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.753805 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld" (OuterVolumeSpecName: "kube-api-access-nfgld") pod "52094b3f-9dc5-422b-a44d-bd05f21be161" (UID: "52094b3f-9dc5-422b-a44d-bd05f21be161"). InnerVolumeSpecName "kube-api-access-nfgld". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.846885 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.847555 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.847716 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.847791 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.847930 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848011 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh6jx\" (UniqueName: \"kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848469 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848595 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848733 4791 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/52094b3f-9dc5-422b-a44d-bd05f21be161-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848763 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfgld\" (UniqueName: \"kubernetes.io/projected/52094b3f-9dc5-422b-a44d-bd05f21be161-kube-api-access-nfgld\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848795 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.848878 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.849222 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.849430 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.867223 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh6jx\" (UniqueName: \"kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx\") pod \"dnsmasq-dns-77585f5f8c-96ptc\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:56 crc kubenswrapper[4791]: I0218 00:55:56.946582 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.288303 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-z58pz" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.289141 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-z58pz" event={"ID":"52094b3f-9dc5-422b-a44d-bd05f21be161","Type":"ContainerDied","Data":"e84df90b1135f676606903587a35cf5acdbb734b7e001b9903df4b23e968f339"} Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.289196 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e84df90b1135f676606903587a35cf5acdbb734b7e001b9903df4b23e968f339" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.698348 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.723042 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5shlq-config-z58pz"] Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.737313 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5shlq-config-z58pz"] Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.877220 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5shlq-config-klvt9"] Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.880344 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.882779 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.894503 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq-config-klvt9"] Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.933726 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.979981 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.980016 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.980047 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dkzd\" (UniqueName: \"kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.980084 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.980135 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:57 crc kubenswrapper[4791]: I0218 00:55:57.980417 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.068617 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-5shlq" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.082051 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29b28\" (UniqueName: \"kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28\") pod \"9fc69d93-6595-4c89-aca5-052479e976f0\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.083219 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts\") pod \"9fc69d93-6595-4c89-aca5-052479e976f0\" (UID: \"9fc69d93-6595-4c89-aca5-052479e976f0\") " Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.083690 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.083729 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.083981 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dkzd\" (UniqueName: \"kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.084008 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.084027 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.084135 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.084569 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.085146 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9fc69d93-6595-4c89-aca5-052479e976f0" (UID: "9fc69d93-6595-4c89-aca5-052479e976f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.085229 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.085292 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.086792 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.088489 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.091002 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28" (OuterVolumeSpecName: "kube-api-access-29b28") pod "9fc69d93-6595-4c89-aca5-052479e976f0" (UID: "9fc69d93-6595-4c89-aca5-052479e976f0"). InnerVolumeSpecName "kube-api-access-29b28". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.109624 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dkzd\" (UniqueName: \"kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd\") pod \"ovn-controller-5shlq-config-klvt9\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.190641 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29b28\" (UniqueName: \"kubernetes.io/projected/9fc69d93-6595-4c89-aca5-052479e976f0-kube-api-access-29b28\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.190681 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fc69d93-6595-4c89-aca5-052479e976f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.274137 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.306047 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xkz8j" event={"ID":"9fc69d93-6595-4c89-aca5-052479e976f0","Type":"ContainerDied","Data":"239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215"} Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.306105 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="239066439ed7179f999cb900a3b166822a1abd4da5d7bf29d770780a4b70d215" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.306074 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xkz8j" Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.312168 4791 generic.go:334] "Generic (PLEG): container finished" podID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerID="8b9b2bcf2e3a619792c6167a262a64ea9092ccd1474b11c988188fdb50bf7de2" exitCode=0 Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.312206 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" event={"ID":"ea161630-d65f-4b00-bcf3-bc3822b3011e","Type":"ContainerDied","Data":"8b9b2bcf2e3a619792c6167a262a64ea9092ccd1474b11c988188fdb50bf7de2"} Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.312232 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" event={"ID":"ea161630-d65f-4b00-bcf3-bc3822b3011e","Type":"ContainerStarted","Data":"83b5a5762e3c069337f5cbbb5292d858e10f0440f48b60a33da6159c4e30c8b5"} Feb 18 00:55:58 crc kubenswrapper[4791]: I0218 00:55:58.771201 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5shlq-config-klvt9"] Feb 18 00:55:59 crc kubenswrapper[4791]: I0218 00:55:59.076109 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52094b3f-9dc5-422b-a44d-bd05f21be161" path="/var/lib/kubelet/pods/52094b3f-9dc5-422b-a44d-bd05f21be161/volumes" Feb 18 00:55:59 crc kubenswrapper[4791]: I0218 00:55:59.324081 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" event={"ID":"ea161630-d65f-4b00-bcf3-bc3822b3011e","Type":"ContainerStarted","Data":"0bb2b7ee20a8bb64f7435a146b058241798837a866812c67cea49c071f19184c"} Feb 18 00:55:59 crc kubenswrapper[4791]: I0218 00:55:59.324288 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:55:59 crc kubenswrapper[4791]: I0218 00:55:59.348139 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podStartSLOduration=3.348119588 podStartE2EDuration="3.348119588s" podCreationTimestamp="2026-02-18 00:55:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:55:59.345967782 +0000 UTC m=+1300.913980952" watchObservedRunningTime="2026-02-18 00:55:59.348119588 +0000 UTC m=+1300.916132768" Feb 18 00:56:01 crc kubenswrapper[4791]: I0218 00:56:01.225192 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-xkz8j"] Feb 18 00:56:01 crc kubenswrapper[4791]: I0218 00:56:01.234263 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-xkz8j"] Feb 18 00:56:03 crc kubenswrapper[4791]: I0218 00:56:03.090509 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fc69d93-6595-4c89-aca5-052479e976f0" path="/var/lib/kubelet/pods/9fc69d93-6595-4c89-aca5-052479e976f0/volumes" Feb 18 00:56:04 crc kubenswrapper[4791]: I0218 00:56:04.775503 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.088193 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.464443 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-fdm5n"] Feb 18 00:56:05 crc kubenswrapper[4791]: E0218 00:56:05.465217 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fc69d93-6595-4c89-aca5-052479e976f0" containerName="mariadb-account-create-update" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.465314 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fc69d93-6595-4c89-aca5-052479e976f0" containerName="mariadb-account-create-update" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.465619 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fc69d93-6595-4c89-aca5-052479e976f0" containerName="mariadb-account-create-update" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.466474 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.501036 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-fdm5n"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.574584 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.575062 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw8r4\" (UniqueName: \"kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.663021 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d604-account-create-update-jl6cf"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.665055 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.673038 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.679285 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw8r4\" (UniqueName: \"kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.679448 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.680151 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.693495 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-kdm9j"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.695132 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.735026 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw8r4\" (UniqueName: \"kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4\") pod \"cinder-db-create-fdm5n\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.754360 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d604-account-create-update-jl6cf"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.782071 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.782475 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n68vj\" (UniqueName: \"kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.782600 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.782710 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfgpp\" (UniqueName: \"kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.799472 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.807710 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-kdm9j"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.851572 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-mhld5"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.853690 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.900651 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mhld5"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.901267 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfgpp\" (UniqueName: \"kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.901516 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.901868 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n68vj\" (UniqueName: \"kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.901982 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.902911 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.903150 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.963889 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n68vj\" (UniqueName: \"kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj\") pod \"cinder-d604-account-create-update-jl6cf\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.963948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfgpp\" (UniqueName: \"kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp\") pod \"heat-db-create-kdm9j\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.967366 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-cskgf"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.968945 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.983411 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ced8-account-create-update-7vkh4"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.991217 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ced8-account-create-update-7vkh4"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.991372 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.993937 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.998486 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cskgf"] Feb 18 00:56:05 crc kubenswrapper[4791]: I0218 00:56:05.999191 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.004014 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p7lk\" (UniqueName: \"kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.004128 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.015850 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.100217 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-2992-account-create-update-lf4d8"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.101652 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.105468 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.106916 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.106994 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tz7p\" (UniqueName: \"kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.107017 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.107081 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p7lk\" (UniqueName: \"kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.107101 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfv6t\" (UniqueName: \"kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.107265 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.107982 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.114432 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2992-account-create-update-lf4d8"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.133505 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-9rmnf"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.137301 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.140901 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p7lk\" (UniqueName: \"kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk\") pod \"neutron-db-create-mhld5\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.146078 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-9rmnf"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.156108 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.156472 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49g64" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.159492 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.159683 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.211454 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212256 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfv6t\" (UniqueName: \"kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212378 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85jg6\" (UniqueName: \"kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212405 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212482 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212523 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212551 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tz7p\" (UniqueName: \"kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212570 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212589 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhp64\" (UniqueName: \"kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.212613 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.213633 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.214814 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.243047 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfv6t\" (UniqueName: \"kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t\") pod \"barbican-db-create-cskgf\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.243080 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tz7p\" (UniqueName: \"kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p\") pod \"neutron-ced8-account-create-update-7vkh4\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.245507 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-7843-account-create-update-95sgt"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.247097 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.251827 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.266820 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7843-account-create-update-95sgt"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314143 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85jg6\" (UniqueName: \"kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314208 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314253 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4shxf\" (UniqueName: \"kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314307 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314337 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhp64\" (UniqueName: \"kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314363 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.314380 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.315411 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.318559 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.319549 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.331895 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85jg6\" (UniqueName: \"kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6\") pod \"keystone-db-sync-9rmnf\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.335671 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhp64\" (UniqueName: \"kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64\") pod \"heat-2992-account-create-update-lf4d8\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.376884 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.385817 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.416659 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.416822 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4shxf\" (UniqueName: \"kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.417429 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.438076 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.438828 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-4vhk9"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.438978 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4shxf\" (UniqueName: \"kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf\") pod \"barbican-7843-account-create-update-95sgt\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.440147 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.443803 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.457128 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4vhk9"] Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.494558 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.519678 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zn58\" (UniqueName: \"kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.520037 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.594911 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.621599 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.621731 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zn58\" (UniqueName: \"kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.622384 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.636928 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zn58\" (UniqueName: \"kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58\") pod \"root-account-create-update-4vhk9\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.682433 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.685499 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.760283 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:06 crc kubenswrapper[4791]: W0218 00:56:06.839441 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a56a8ca_c3e8_4cd5_ac83_0a33e50a38ac.slice/crio-da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72 WatchSource:0}: Error finding container da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72: Status 404 returned error can't find the container with id da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72 Feb 18 00:56:06 crc kubenswrapper[4791]: I0218 00:56:06.950335 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.051204 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.051797 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-nkd6n" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="dnsmasq-dns" containerID="cri-o://60217095f344c2c2439620205475f6181ec1deeec7efc211b548c920612e7b2c" gracePeriod=10 Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.449400 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-klvt9" event={"ID":"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac","Type":"ContainerStarted","Data":"da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72"} Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.454121 4791 generic.go:334] "Generic (PLEG): container finished" podID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerID="60217095f344c2c2439620205475f6181ec1deeec7efc211b548c920612e7b2c" exitCode=0 Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.454188 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nkd6n" event={"ID":"6eb117b6-f49c-4bc7-a59e-50c32713d4a2","Type":"ContainerDied","Data":"60217095f344c2c2439620205475f6181ec1deeec7efc211b548c920612e7b2c"} Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.457875 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.525240 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-9rmnf"] Feb 18 00:56:07 crc kubenswrapper[4791]: I0218 00:56:07.901608 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7843-account-create-update-95sgt"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.230636 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2992-account-create-update-lf4d8"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.290891 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4vhk9"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.350198 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mhld5"] Feb 18 00:56:08 crc kubenswrapper[4791]: W0218 00:56:08.376589 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5227c08e_63f7_4ec3_b01f_ad54d550ce8e.slice/crio-52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc WatchSource:0}: Error finding container 52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc: Status 404 returned error can't find the container with id 52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.383045 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.476882 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wx5vg" event={"ID":"e48266b0-04c5-4da5-994e-516ef8e36299","Type":"ContainerStarted","Data":"2b619710bf1a07710c75b4fb604764fe25a6dd8f600aba6bd2159c423b54bf79"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.478826 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7843-account-create-update-95sgt" event={"ID":"f91b47af-6331-4d04-b4b5-9197d09fd773","Type":"ContainerStarted","Data":"36b634e49814e36b78a8094cc306e1645e1d190d9b2049876984a9bbdd438e42"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.478915 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7843-account-create-update-95sgt" event={"ID":"f91b47af-6331-4d04-b4b5-9197d09fd773","Type":"ContainerStarted","Data":"38fe48981f8b6cb0f2e6e5611ba42d26c8fa123d54a0bbdc273fb7ba0c407bcf"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.487232 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mhld5" event={"ID":"5227c08e-63f7-4ec3-b01f-ad54d550ce8e","Type":"ContainerStarted","Data":"52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.488661 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2992-account-create-update-lf4d8" event={"ID":"adad799a-952a-4ae3-8c37-02707bf01576","Type":"ContainerStarted","Data":"f629d310cae601a63164481b85b3a58af8a0206ee089b96195d0a0238c90b1c1"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.491422 4791 generic.go:334] "Generic (PLEG): container finished" podID="1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" containerID="089bc538267743e9c2f9f7ead5b7b40d8d569980c326096728d6791f30f5df0a" exitCode=0 Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.491588 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-klvt9" event={"ID":"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac","Type":"ContainerDied","Data":"089bc538267743e9c2f9f7ead5b7b40d8d569980c326096728d6791f30f5df0a"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.505680 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqblt\" (UniqueName: \"kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt\") pod \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.505717 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc\") pod \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.505827 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb\") pod \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.506046 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config\") pod \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.506191 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb\") pod \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\" (UID: \"6eb117b6-f49c-4bc7-a59e-50c32713d4a2\") " Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.513784 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cskgf"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.522261 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nkd6n" event={"ID":"6eb117b6-f49c-4bc7-a59e-50c32713d4a2","Type":"ContainerDied","Data":"1af09ea4d03cefa8b128cf61829af8bb8857663402f579af2f1959f98794aa42"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.522312 4791 scope.go:117] "RemoveContainer" containerID="60217095f344c2c2439620205475f6181ec1deeec7efc211b548c920612e7b2c" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.522424 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nkd6n" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.525681 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wx5vg" podStartSLOduration=3.870697068 podStartE2EDuration="20.525660935s" podCreationTimestamp="2026-02-18 00:55:48 +0000 UTC" firstStartedPulling="2026-02-18 00:55:50.497696739 +0000 UTC m=+1292.065709909" lastFinishedPulling="2026-02-18 00:56:07.152660606 +0000 UTC m=+1308.720673776" observedRunningTime="2026-02-18 00:56:08.493481997 +0000 UTC m=+1310.061495167" watchObservedRunningTime="2026-02-18 00:56:08.525660935 +0000 UTC m=+1310.093674095" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.527316 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt" (OuterVolumeSpecName: "kube-api-access-xqblt") pod "6eb117b6-f49c-4bc7-a59e-50c32713d4a2" (UID: "6eb117b6-f49c-4bc7-a59e-50c32713d4a2"). InnerVolumeSpecName "kube-api-access-xqblt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.545785 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9rmnf" event={"ID":"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7","Type":"ContainerStarted","Data":"9a60f10eaa257f48a0a2bc763ba901ada196804d94f7a3e7c7d7ffedcb960ca3"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.554381 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4vhk9" event={"ID":"007e5d84-b863-42c0-88da-d72362b8f0af","Type":"ContainerStarted","Data":"37b7fca10da1fabd8f5f8a6af6114f30c10885e043cb186f3c2ff5f4bf5585b4"} Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.598878 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-7843-account-create-update-95sgt" podStartSLOduration=2.598858763 podStartE2EDuration="2.598858763s" podCreationTimestamp="2026-02-18 00:56:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:08.513653902 +0000 UTC m=+1310.081667062" watchObservedRunningTime="2026-02-18 00:56:08.598858763 +0000 UTC m=+1310.166871933" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.600583 4791 scope.go:117] "RemoveContainer" containerID="6bc92c7ea057ed891b0298a2c0398290482d205d5d0939f82641e19c1b0c2f11" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.622671 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqblt\" (UniqueName: \"kubernetes.io/projected/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-kube-api-access-xqblt\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.679114 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-fdm5n"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.701209 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ced8-account-create-update-7vkh4"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.733789 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-kdm9j"] Feb 18 00:56:08 crc kubenswrapper[4791]: I0218 00:56:08.750526 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d604-account-create-update-jl6cf"] Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.060742 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config" (OuterVolumeSpecName: "config") pod "6eb117b6-f49c-4bc7-a59e-50c32713d4a2" (UID: "6eb117b6-f49c-4bc7-a59e-50c32713d4a2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.079287 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6eb117b6-f49c-4bc7-a59e-50c32713d4a2" (UID: "6eb117b6-f49c-4bc7-a59e-50c32713d4a2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.085552 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6eb117b6-f49c-4bc7-a59e-50c32713d4a2" (UID: "6eb117b6-f49c-4bc7-a59e-50c32713d4a2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.089589 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6eb117b6-f49c-4bc7-a59e-50c32713d4a2" (UID: "6eb117b6-f49c-4bc7-a59e-50c32713d4a2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.139263 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.139293 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.139303 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.139311 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eb117b6-f49c-4bc7-a59e-50c32713d4a2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.453790 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.456268 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nkd6n"] Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.572204 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d604-account-create-update-jl6cf" event={"ID":"a54c4f28-a37a-446e-a455-f471bfb7b4fa","Type":"ContainerStarted","Data":"152b9ba180d2c98289160af2d3d5e6cf4bcc68bb5fe9a60b63b4fcfb5989a5ef"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.576688 4791 generic.go:334] "Generic (PLEG): container finished" podID="f91b47af-6331-4d04-b4b5-9197d09fd773" containerID="36b634e49814e36b78a8094cc306e1645e1d190d9b2049876984a9bbdd438e42" exitCode=0 Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.576780 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7843-account-create-update-95sgt" event={"ID":"f91b47af-6331-4d04-b4b5-9197d09fd773","Type":"ContainerDied","Data":"36b634e49814e36b78a8094cc306e1645e1d190d9b2049876984a9bbdd438e42"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.579479 4791 generic.go:334] "Generic (PLEG): container finished" podID="5227c08e-63f7-4ec3-b01f-ad54d550ce8e" containerID="9c3db097a70ece3f00e20f8cd8cceaf3f98d5c1afb9298937d78e4fc3cf4d607" exitCode=0 Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.579605 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mhld5" event={"ID":"5227c08e-63f7-4ec3-b01f-ad54d550ce8e","Type":"ContainerDied","Data":"9c3db097a70ece3f00e20f8cd8cceaf3f98d5c1afb9298937d78e4fc3cf4d607"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.625988 4791 generic.go:334] "Generic (PLEG): container finished" podID="adad799a-952a-4ae3-8c37-02707bf01576" containerID="7f2c3a747c18f59bda4800b9f93d7057a4c95b6de155a8aaee1d5b2d030effb4" exitCode=0 Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.626078 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2992-account-create-update-lf4d8" event={"ID":"adad799a-952a-4ae3-8c37-02707bf01576","Type":"ContainerDied","Data":"7f2c3a747c18f59bda4800b9f93d7057a4c95b6de155a8aaee1d5b2d030effb4"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.653053 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ced8-account-create-update-7vkh4" event={"ID":"f4551c6b-bd8f-49bd-8755-4c1262f74e04","Type":"ContainerStarted","Data":"39226f4aae797bc65c2fffb3d285b953b8f4f50da013e070f4706b9ed09e9ac5"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.657387 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cskgf" event={"ID":"3e122e7c-a512-4608-ae3a-74e528fcfed4","Type":"ContainerStarted","Data":"2872c462053d11c396c61ef9b9f44ddcb96dcbea2a6870d50008540c7bda9f10"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.659464 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fdm5n" event={"ID":"f27aa21f-8528-401f-b41a-090ee07af740","Type":"ContainerStarted","Data":"e87b3d3b12eb0342c5c5b6f6bf1decaeb75087d62ce31462fc7469fd662250f8"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.660428 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kdm9j" event={"ID":"ab28bbf6-b31e-4634-9a35-1e2333b10adc","Type":"ContainerStarted","Data":"7470027f1c26a0ab8ecac3d97dee1314c2359def87b40fd046324beafc6f7453"} Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.666126 4791 generic.go:334] "Generic (PLEG): container finished" podID="007e5d84-b863-42c0-88da-d72362b8f0af" containerID="48d1d8899654959c20dfc366bc088540463b1529389c36a07c0607761d9a5e48" exitCode=0 Feb 18 00:56:09 crc kubenswrapper[4791]: I0218 00:56:09.666251 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4vhk9" event={"ID":"007e5d84-b863-42c0-88da-d72362b8f0af","Type":"ContainerDied","Data":"48d1d8899654959c20dfc366bc088540463b1529389c36a07c0607761d9a5e48"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.324338 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465585 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465631 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465665 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465755 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465796 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dkzd\" (UniqueName: \"kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.465945 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn\") pod \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\" (UID: \"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac\") " Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.466514 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.467036 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.467098 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run" (OuterVolumeSpecName: "var-run") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.468036 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts" (OuterVolumeSpecName: "scripts") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.468247 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.473409 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd" (OuterVolumeSpecName: "kube-api-access-6dkzd") pod "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" (UID: "1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac"). InnerVolumeSpecName "kube-api-access-6dkzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569223 4791 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-additional-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569636 4791 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569650 4791 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-log-ovn\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569660 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569671 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dkzd\" (UniqueName: \"kubernetes.io/projected/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-kube-api-access-6dkzd\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.569684 4791 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac-var-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.694460 4791 generic.go:334] "Generic (PLEG): container finished" podID="f27aa21f-8528-401f-b41a-090ee07af740" containerID="d20acfdec4fa9356a4d7796a82e8d396d63642783d369df98a343d3fd73d83e0" exitCode=0 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.694602 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fdm5n" event={"ID":"f27aa21f-8528-401f-b41a-090ee07af740","Type":"ContainerDied","Data":"d20acfdec4fa9356a4d7796a82e8d396d63642783d369df98a343d3fd73d83e0"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.701166 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5shlq-config-klvt9" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.701187 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5shlq-config-klvt9" event={"ID":"1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac","Type":"ContainerDied","Data":"da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.701485 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da0ba437dd6af6f4ebab5ddb1b48f19027aa576e2d023f67566093559e060e72" Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.744471 4791 generic.go:334] "Generic (PLEG): container finished" podID="ab28bbf6-b31e-4634-9a35-1e2333b10adc" containerID="0f45542c0ded31f1475abd3b7bc092d32bb4f794afedf5f1ff3a1f8b75d2c0ca" exitCode=0 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.744559 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kdm9j" event={"ID":"ab28bbf6-b31e-4634-9a35-1e2333b10adc","Type":"ContainerDied","Data":"0f45542c0ded31f1475abd3b7bc092d32bb4f794afedf5f1ff3a1f8b75d2c0ca"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.764629 4791 generic.go:334] "Generic (PLEG): container finished" podID="f4551c6b-bd8f-49bd-8755-4c1262f74e04" containerID="8194aec0d8a4b7518de3478bfc35b8845308657e96e069ea0ccdd86302f683a1" exitCode=0 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.764688 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ced8-account-create-update-7vkh4" event={"ID":"f4551c6b-bd8f-49bd-8755-4c1262f74e04","Type":"ContainerDied","Data":"8194aec0d8a4b7518de3478bfc35b8845308657e96e069ea0ccdd86302f683a1"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.827507 4791 generic.go:334] "Generic (PLEG): container finished" podID="a54c4f28-a37a-446e-a455-f471bfb7b4fa" containerID="7e3e54cdb9589ddb430da2d5cdac3c28d586bcf838185f6955cc5d5a1913d8a2" exitCode=0 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.827591 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d604-account-create-update-jl6cf" event={"ID":"a54c4f28-a37a-446e-a455-f471bfb7b4fa","Type":"ContainerDied","Data":"7e3e54cdb9589ddb430da2d5cdac3c28d586bcf838185f6955cc5d5a1913d8a2"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.838454 4791 generic.go:334] "Generic (PLEG): container finished" podID="3e122e7c-a512-4608-ae3a-74e528fcfed4" containerID="ec178c6773d63da691a8609da2d805f0a70fce192b9b9372a72d1ec60c13c41e" exitCode=0 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.838947 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cskgf" event={"ID":"3e122e7c-a512-4608-ae3a-74e528fcfed4","Type":"ContainerDied","Data":"ec178c6773d63da691a8609da2d805f0a70fce192b9b9372a72d1ec60c13c41e"} Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.890288 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.890763 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="prometheus" containerID="cri-o://dab91d4e5233a769a0bcb3480c1216b8f00d5ed454db656fad040cef4bc4c5df" gracePeriod=600 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.890835 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="thanos-sidecar" containerID="cri-o://56f1bdcf65bc44f2090c0827ec52a299f6410d974706f5259f3a57cb62968094" gracePeriod=600 Feb 18 00:56:10 crc kubenswrapper[4791]: I0218 00:56:10.890844 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="config-reloader" containerID="cri-o://804a33d2882f7f64057b30535001a5e4734a265390320e6c2f4ec6777045afde" gracePeriod=600 Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.082881 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" path="/var/lib/kubelet/pods/6eb117b6-f49c-4bc7-a59e-50c32713d4a2/volumes" Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.408309 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5shlq-config-klvt9"] Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.414276 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5shlq-config-klvt9"] Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.683313 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.139:9090/-/ready\": dial tcp 10.217.0.139:9090: connect: connection refused" Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859647 4791 generic.go:334] "Generic (PLEG): container finished" podID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerID="56f1bdcf65bc44f2090c0827ec52a299f6410d974706f5259f3a57cb62968094" exitCode=0 Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859690 4791 generic.go:334] "Generic (PLEG): container finished" podID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerID="804a33d2882f7f64057b30535001a5e4734a265390320e6c2f4ec6777045afde" exitCode=0 Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859706 4791 generic.go:334] "Generic (PLEG): container finished" podID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerID="dab91d4e5233a769a0bcb3480c1216b8f00d5ed454db656fad040cef4bc4c5df" exitCode=0 Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859726 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerDied","Data":"56f1bdcf65bc44f2090c0827ec52a299f6410d974706f5259f3a57cb62968094"} Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859774 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerDied","Data":"804a33d2882f7f64057b30535001a5e4734a265390320e6c2f4ec6777045afde"} Feb 18 00:56:11 crc kubenswrapper[4791]: I0218 00:56:11.859796 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerDied","Data":"dab91d4e5233a769a0bcb3480c1216b8f00d5ed454db656fad040cef4bc4c5df"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.079440 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" path="/var/lib/kubelet/pods/1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac/volumes" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.891961 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cskgf" event={"ID":"3e122e7c-a512-4608-ae3a-74e528fcfed4","Type":"ContainerDied","Data":"2872c462053d11c396c61ef9b9f44ddcb96dcbea2a6870d50008540c7bda9f10"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.892015 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2872c462053d11c396c61ef9b9f44ddcb96dcbea2a6870d50008540c7bda9f10" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.894278 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kdm9j" event={"ID":"ab28bbf6-b31e-4634-9a35-1e2333b10adc","Type":"ContainerDied","Data":"7470027f1c26a0ab8ecac3d97dee1314c2359def87b40fd046324beafc6f7453"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.894358 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7470027f1c26a0ab8ecac3d97dee1314c2359def87b40fd046324beafc6f7453" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.895974 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d604-account-create-update-jl6cf" event={"ID":"a54c4f28-a37a-446e-a455-f471bfb7b4fa","Type":"ContainerDied","Data":"152b9ba180d2c98289160af2d3d5e6cf4bcc68bb5fe9a60b63b4fcfb5989a5ef"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.896006 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="152b9ba180d2c98289160af2d3d5e6cf4bcc68bb5fe9a60b63b4fcfb5989a5ef" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.897944 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fdm5n" event={"ID":"f27aa21f-8528-401f-b41a-090ee07af740","Type":"ContainerDied","Data":"e87b3d3b12eb0342c5c5b6f6bf1decaeb75087d62ce31462fc7469fd662250f8"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.897977 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e87b3d3b12eb0342c5c5b6f6bf1decaeb75087d62ce31462fc7469fd662250f8" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.903815 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7843-account-create-update-95sgt" event={"ID":"f91b47af-6331-4d04-b4b5-9197d09fd773","Type":"ContainerDied","Data":"38fe48981f8b6cb0f2e6e5611ba42d26c8fa123d54a0bbdc273fb7ba0c407bcf"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.903860 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38fe48981f8b6cb0f2e6e5611ba42d26c8fa123d54a0bbdc273fb7ba0c407bcf" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.912957 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mhld5" event={"ID":"5227c08e-63f7-4ec3-b01f-ad54d550ce8e","Type":"ContainerDied","Data":"52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.913024 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52eb94759771e3cdb70344a6d0bd1206daa3ad9506df14597ca96a819ab0f0bc" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.947623 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2992-account-create-update-lf4d8" event={"ID":"adad799a-952a-4ae3-8c37-02707bf01576","Type":"ContainerDied","Data":"f629d310cae601a63164481b85b3a58af8a0206ee089b96195d0a0238c90b1c1"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.947663 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f629d310cae601a63164481b85b3a58af8a0206ee089b96195d0a0238c90b1c1" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.953137 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ced8-account-create-update-7vkh4" event={"ID":"f4551c6b-bd8f-49bd-8755-4c1262f74e04","Type":"ContainerDied","Data":"39226f4aae797bc65c2fffb3d285b953b8f4f50da013e070f4706b9ed09e9ac5"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.953202 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39226f4aae797bc65c2fffb3d285b953b8f4f50da013e070f4706b9ed09e9ac5" Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.954689 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4vhk9" event={"ID":"007e5d84-b863-42c0-88da-d72362b8f0af","Type":"ContainerDied","Data":"37b7fca10da1fabd8f5f8a6af6114f30c10885e043cb186f3c2ff5f4bf5585b4"} Feb 18 00:56:13 crc kubenswrapper[4791]: I0218 00:56:13.954715 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37b7fca10da1fabd8f5f8a6af6114f30c10885e043cb186f3c2ff5f4bf5585b4" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.222129 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.241660 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.247089 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.252639 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.304722 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.336981 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.337149 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.341849 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.350872 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386609 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n68vj\" (UniqueName: \"kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj\") pod \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386721 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts\") pod \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386746 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts\") pod \"007e5d84-b863-42c0-88da-d72362b8f0af\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386776 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts\") pod \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386832 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts\") pod \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386941 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tz7p\" (UniqueName: \"kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p\") pod \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\" (UID: \"f4551c6b-bd8f-49bd-8755-4c1262f74e04\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.386976 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfgpp\" (UniqueName: \"kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp\") pod \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\" (UID: \"ab28bbf6-b31e-4634-9a35-1e2333b10adc\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.387022 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zn58\" (UniqueName: \"kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58\") pod \"007e5d84-b863-42c0-88da-d72362b8f0af\" (UID: \"007e5d84-b863-42c0-88da-d72362b8f0af\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.387052 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p7lk\" (UniqueName: \"kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk\") pod \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\" (UID: \"5227c08e-63f7-4ec3-b01f-ad54d550ce8e\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.387102 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts\") pod \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\" (UID: \"a54c4f28-a37a-446e-a455-f471bfb7b4fa\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.388180 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.389507 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a54c4f28-a37a-446e-a455-f471bfb7b4fa" (UID: "a54c4f28-a37a-446e-a455-f471bfb7b4fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.389538 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4551c6b-bd8f-49bd-8755-4c1262f74e04" (UID: "f4551c6b-bd8f-49bd-8755-4c1262f74e04"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.390475 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "007e5d84-b863-42c0-88da-d72362b8f0af" (UID: "007e5d84-b863-42c0-88da-d72362b8f0af"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.390705 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ab28bbf6-b31e-4634-9a35-1e2333b10adc" (UID: "ab28bbf6-b31e-4634-9a35-1e2333b10adc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.394663 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5227c08e-63f7-4ec3-b01f-ad54d550ce8e" (UID: "5227c08e-63f7-4ec3-b01f-ad54d550ce8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.406551 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj" (OuterVolumeSpecName: "kube-api-access-n68vj") pod "a54c4f28-a37a-446e-a455-f471bfb7b4fa" (UID: "a54c4f28-a37a-446e-a455-f471bfb7b4fa"). InnerVolumeSpecName "kube-api-access-n68vj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.407015 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58" (OuterVolumeSpecName: "kube-api-access-6zn58") pod "007e5d84-b863-42c0-88da-d72362b8f0af" (UID: "007e5d84-b863-42c0-88da-d72362b8f0af"). InnerVolumeSpecName "kube-api-access-6zn58". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.407149 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp" (OuterVolumeSpecName: "kube-api-access-zfgpp") pod "ab28bbf6-b31e-4634-9a35-1e2333b10adc" (UID: "ab28bbf6-b31e-4634-9a35-1e2333b10adc"). InnerVolumeSpecName "kube-api-access-zfgpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.411652 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p" (OuterVolumeSpecName: "kube-api-access-4tz7p") pod "f4551c6b-bd8f-49bd-8755-4c1262f74e04" (UID: "f4551c6b-bd8f-49bd-8755-4c1262f74e04"). InnerVolumeSpecName "kube-api-access-4tz7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.418361 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk" (OuterVolumeSpecName: "kube-api-access-9p7lk") pod "5227c08e-63f7-4ec3-b01f-ad54d550ce8e" (UID: "5227c08e-63f7-4ec3-b01f-ad54d550ce8e"). InnerVolumeSpecName "kube-api-access-9p7lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.488799 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfv6t\" (UniqueName: \"kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t\") pod \"3e122e7c-a512-4608-ae3a-74e528fcfed4\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.488847 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.488890 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.488960 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.488991 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts\") pod \"adad799a-952a-4ae3-8c37-02707bf01576\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489014 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhp64\" (UniqueName: \"kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64\") pod \"adad799a-952a-4ae3-8c37-02707bf01576\" (UID: \"adad799a-952a-4ae3-8c37-02707bf01576\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489092 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4shxf\" (UniqueName: \"kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf\") pod \"f91b47af-6331-4d04-b4b5-9197d09fd773\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489119 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489142 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts\") pod \"f91b47af-6331-4d04-b4b5-9197d09fd773\" (UID: \"f91b47af-6331-4d04-b4b5-9197d09fd773\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489196 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw8r4\" (UniqueName: \"kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4\") pod \"f27aa21f-8528-401f-b41a-090ee07af740\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489248 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489263 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489280 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts\") pod \"3e122e7c-a512-4608-ae3a-74e528fcfed4\" (UID: \"3e122e7c-a512-4608-ae3a-74e528fcfed4\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489319 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts\") pod \"f27aa21f-8528-401f-b41a-090ee07af740\" (UID: \"f27aa21f-8528-401f-b41a-090ee07af740\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489567 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489624 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489673 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.489706 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6z67q\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q\") pod \"1c3406dc-1b5d-4376-8b80-b55720c15091\" (UID: \"1c3406dc-1b5d-4376-8b80-b55720c15091\") " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490222 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4551c6b-bd8f-49bd-8755-4c1262f74e04-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490238 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tz7p\" (UniqueName: \"kubernetes.io/projected/f4551c6b-bd8f-49bd-8755-4c1262f74e04-kube-api-access-4tz7p\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490250 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfgpp\" (UniqueName: \"kubernetes.io/projected/ab28bbf6-b31e-4634-9a35-1e2333b10adc-kube-api-access-zfgpp\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490260 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zn58\" (UniqueName: \"kubernetes.io/projected/007e5d84-b863-42c0-88da-d72362b8f0af-kube-api-access-6zn58\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490269 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p7lk\" (UniqueName: \"kubernetes.io/projected/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-kube-api-access-9p7lk\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490264 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f91b47af-6331-4d04-b4b5-9197d09fd773" (UID: "f91b47af-6331-4d04-b4b5-9197d09fd773"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490281 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e122e7c-a512-4608-ae3a-74e528fcfed4" (UID: "3e122e7c-a512-4608-ae3a-74e528fcfed4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490277 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54c4f28-a37a-446e-a455-f471bfb7b4fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490357 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n68vj\" (UniqueName: \"kubernetes.io/projected/a54c4f28-a37a-446e-a455-f471bfb7b4fa-kube-api-access-n68vj\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490371 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab28bbf6-b31e-4634-9a35-1e2333b10adc-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490383 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/007e5d84-b863-42c0-88da-d72362b8f0af-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.490394 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5227c08e-63f7-4ec3-b01f-ad54d550ce8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.491131 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "adad799a-952a-4ae3-8c37-02707bf01576" (UID: "adad799a-952a-4ae3-8c37-02707bf01576"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.492127 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.493549 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.494011 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f27aa21f-8528-401f-b41a-090ee07af740" (UID: "f27aa21f-8528-401f-b41a-090ee07af740"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.494349 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.496777 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4" (OuterVolumeSpecName: "kube-api-access-pw8r4") pod "f27aa21f-8528-401f-b41a-090ee07af740" (UID: "f27aa21f-8528-401f-b41a-090ee07af740"). InnerVolumeSpecName "kube-api-access-pw8r4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.497338 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64" (OuterVolumeSpecName: "kube-api-access-bhp64") pod "adad799a-952a-4ae3-8c37-02707bf01576" (UID: "adad799a-952a-4ae3-8c37-02707bf01576"). InnerVolumeSpecName "kube-api-access-bhp64". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.497435 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t" (OuterVolumeSpecName: "kube-api-access-pfv6t") pod "3e122e7c-a512-4608-ae3a-74e528fcfed4" (UID: "3e122e7c-a512-4608-ae3a-74e528fcfed4"). InnerVolumeSpecName "kube-api-access-pfv6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.499251 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out" (OuterVolumeSpecName: "config-out") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.499605 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q" (OuterVolumeSpecName: "kube-api-access-6z67q") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "kube-api-access-6z67q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.499665 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.500297 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf" (OuterVolumeSpecName: "kube-api-access-4shxf") pod "f91b47af-6331-4d04-b4b5-9197d09fd773" (UID: "f91b47af-6331-4d04-b4b5-9197d09fd773"). InnerVolumeSpecName "kube-api-access-4shxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.502368 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config" (OuterVolumeSpecName: "config") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.502552 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.542702 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config" (OuterVolumeSpecName: "web-config") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.544381 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "1c3406dc-1b5d-4376-8b80-b55720c15091" (UID: "1c3406dc-1b5d-4376-8b80-b55720c15091"). InnerVolumeSpecName "pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592020 4791 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-web-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592057 4791 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592070 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e122e7c-a512-4608-ae3a-74e528fcfed4-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592080 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27aa21f-8528-401f-b41a-090ee07af740-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592115 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") on node \"crc\" " Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592126 4791 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-tls-assets\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592137 4791 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592147 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6z67q\" (UniqueName: \"kubernetes.io/projected/1c3406dc-1b5d-4376-8b80-b55720c15091-kube-api-access-6z67q\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592172 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfv6t\" (UniqueName: \"kubernetes.io/projected/3e122e7c-a512-4608-ae3a-74e528fcfed4-kube-api-access-pfv6t\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592181 4791 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c3406dc-1b5d-4376-8b80-b55720c15091-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592193 4791 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c3406dc-1b5d-4376-8b80-b55720c15091-config-out\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592203 4791 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592211 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/adad799a-952a-4ae3-8c37-02707bf01576-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592220 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhp64\" (UniqueName: \"kubernetes.io/projected/adad799a-952a-4ae3-8c37-02707bf01576-kube-api-access-bhp64\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592229 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4shxf\" (UniqueName: \"kubernetes.io/projected/f91b47af-6331-4d04-b4b5-9197d09fd773-kube-api-access-4shxf\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592238 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c3406dc-1b5d-4376-8b80-b55720c15091-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592247 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f91b47af-6331-4d04-b4b5-9197d09fd773-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.592255 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw8r4\" (UniqueName: \"kubernetes.io/projected/f27aa21f-8528-401f-b41a-090ee07af740-kube-api-access-pw8r4\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.625475 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.625652 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e") on node "crc" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.693774 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.965864 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9rmnf" event={"ID":"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7","Type":"ContainerStarted","Data":"2025b9012e9d047e209e35ef2efbeb64906a7db1080dcc5f50330f6594da1a61"} Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.968924 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d604-account-create-update-jl6cf" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.968960 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4vhk9" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.968991 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c3406dc-1b5d-4376-8b80-b55720c15091","Type":"ContainerDied","Data":"46e14f14047bfb29055b5f1098d6e1e2aa7eceeefdacd4817e070435f78cd029"} Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969029 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ced8-account-create-update-7vkh4" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969053 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cskgf" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969009 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7843-account-create-update-95sgt" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969106 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mhld5" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969098 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kdm9j" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969139 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969098 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fdm5n" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969105 4791 scope.go:117] "RemoveContainer" containerID="56f1bdcf65bc44f2090c0827ec52a299f6410d974706f5259f3a57cb62968094" Feb 18 00:56:14 crc kubenswrapper[4791]: I0218 00:56:14.969251 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2992-account-create-update-lf4d8" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.008584 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-9rmnf" podStartSLOduration=2.674189708 podStartE2EDuration="9.008547265s" podCreationTimestamp="2026-02-18 00:56:06 +0000 UTC" firstStartedPulling="2026-02-18 00:56:07.555004195 +0000 UTC m=+1309.123017365" lastFinishedPulling="2026-02-18 00:56:13.889361752 +0000 UTC m=+1315.457374922" observedRunningTime="2026-02-18 00:56:14.996739599 +0000 UTC m=+1316.564752759" watchObservedRunningTime="2026-02-18 00:56:15.008547265 +0000 UTC m=+1316.576560425" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.013566 4791 scope.go:117] "RemoveContainer" containerID="804a33d2882f7f64057b30535001a5e4734a265390320e6c2f4ec6777045afde" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.061911 4791 scope.go:117] "RemoveContainer" containerID="dab91d4e5233a769a0bcb3480c1216b8f00d5ed454db656fad040cef4bc4c5df" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.099787 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.120335 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.120522 4791 scope.go:117] "RemoveContainer" containerID="765528daa15c40187620b57eeeec366bd55c2183900753d58ea1126db14d6915" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.137390 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143495 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a54c4f28-a37a-446e-a455-f471bfb7b4fa" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143530 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a54c4f28-a37a-446e-a455-f471bfb7b4fa" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143547 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="config-reloader" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143554 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="config-reloader" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143576 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e122e7c-a512-4608-ae3a-74e528fcfed4" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143583 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e122e7c-a512-4608-ae3a-74e528fcfed4" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143604 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="dnsmasq-dns" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143611 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="dnsmasq-dns" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143663 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="init" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143670 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="init" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143691 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adad799a-952a-4ae3-8c37-02707bf01576" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143698 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="adad799a-952a-4ae3-8c37-02707bf01576" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143706 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="prometheus" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143712 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="prometheus" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143734 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4551c6b-bd8f-49bd-8755-4c1262f74e04" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143742 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4551c6b-bd8f-49bd-8755-4c1262f74e04" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143764 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="init-config-reloader" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143771 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="init-config-reloader" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143784 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="thanos-sidecar" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143790 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="thanos-sidecar" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143807 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="007e5d84-b863-42c0-88da-d72362b8f0af" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143813 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="007e5d84-b863-42c0-88da-d72362b8f0af" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143833 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab28bbf6-b31e-4634-9a35-1e2333b10adc" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143841 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab28bbf6-b31e-4634-9a35-1e2333b10adc" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143853 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" containerName="ovn-config" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143859 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" containerName="ovn-config" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143877 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91b47af-6331-4d04-b4b5-9197d09fd773" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143883 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91b47af-6331-4d04-b4b5-9197d09fd773" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143894 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f27aa21f-8528-401f-b41a-090ee07af740" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143900 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f27aa21f-8528-401f-b41a-090ee07af740" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: E0218 00:56:15.143915 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5227c08e-63f7-4ec3-b01f-ad54d550ce8e" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.143922 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5227c08e-63f7-4ec3-b01f-ad54d550ce8e" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144507 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb117b6-f49c-4bc7-a59e-50c32713d4a2" containerName="dnsmasq-dns" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144533 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="thanos-sidecar" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144547 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a54c4f28-a37a-446e-a455-f471bfb7b4fa" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144556 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e122e7c-a512-4608-ae3a-74e528fcfed4" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144578 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="007e5d84-b863-42c0-88da-d72362b8f0af" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144599 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="5227c08e-63f7-4ec3-b01f-ad54d550ce8e" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144613 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4551c6b-bd8f-49bd-8755-4c1262f74e04" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144629 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="prometheus" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144643 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="adad799a-952a-4ae3-8c37-02707bf01576" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144665 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a56a8ca-c3e8-4cd5-ac83-0a33e50a38ac" containerName="ovn-config" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144679 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91b47af-6331-4d04-b4b5-9197d09fd773" containerName="mariadb-account-create-update" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144691 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f27aa21f-8528-401f-b41a-090ee07af740" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144704 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" containerName="config-reloader" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.144715 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab28bbf6-b31e-4634-9a35-1e2333b10adc" containerName="mariadb-database-create" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.179995 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.180221 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.182482 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.183494 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-g9qqq" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.183570 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.183706 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.183852 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.183915 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.184048 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.184114 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.192221 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306353 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306413 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306451 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306574 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306638 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306687 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306760 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306802 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm9r7\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-kube-api-access-dm9r7\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.306846 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.307059 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.307129 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.307196 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.307303 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410623 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410682 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410713 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410733 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410758 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410776 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410797 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410821 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410841 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm9r7\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-kube-api-access-dm9r7\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410875 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410939 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410970 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.410989 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.411566 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.411606 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.412214 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/6658e3ff-2b3b-476c-8638-a5b3d94005d4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.417692 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.418377 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.421914 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.424948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.425009 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.425821 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.425850 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/08956609b52980c426bf22e33256fb47c97baf3f2e3c40e37bcbe84538d50090/globalmount\"" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.438966 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.442633 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/6658e3ff-2b3b-476c-8638-a5b3d94005d4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.455659 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm9r7\" (UniqueName: \"kubernetes.io/projected/6658e3ff-2b3b-476c-8638-a5b3d94005d4-kube-api-access-dm9r7\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.457531 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/6658e3ff-2b3b-476c-8638-a5b3d94005d4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.512107 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6ba21e8a-249c-4f45-bb7e-667bfaff725e\") pod \"prometheus-metric-storage-0\" (UID: \"6658e3ff-2b3b-476c-8638-a5b3d94005d4\") " pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:15 crc kubenswrapper[4791]: I0218 00:56:15.802281 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Feb 18 00:56:16 crc kubenswrapper[4791]: I0218 00:56:16.321098 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Feb 18 00:56:16 crc kubenswrapper[4791]: W0218 00:56:16.321405 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6658e3ff_2b3b_476c_8638_a5b3d94005d4.slice/crio-b3984d27865480b0bf20fe83e053698caf13748064191ebd3bc976628243b0cc WatchSource:0}: Error finding container b3984d27865480b0bf20fe83e053698caf13748064191ebd3bc976628243b0cc: Status 404 returned error can't find the container with id b3984d27865480b0bf20fe83e053698caf13748064191ebd3bc976628243b0cc Feb 18 00:56:16 crc kubenswrapper[4791]: I0218 00:56:16.996970 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerStarted","Data":"b3984d27865480b0bf20fe83e053698caf13748064191ebd3bc976628243b0cc"} Feb 18 00:56:17 crc kubenswrapper[4791]: I0218 00:56:17.074545 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c3406dc-1b5d-4376-8b80-b55720c15091" path="/var/lib/kubelet/pods/1c3406dc-1b5d-4376-8b80-b55720c15091/volumes" Feb 18 00:56:20 crc kubenswrapper[4791]: I0218 00:56:20.027121 4791 generic.go:334] "Generic (PLEG): container finished" podID="57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" containerID="2025b9012e9d047e209e35ef2efbeb64906a7db1080dcc5f50330f6594da1a61" exitCode=0 Feb 18 00:56:20 crc kubenswrapper[4791]: I0218 00:56:20.027193 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9rmnf" event={"ID":"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7","Type":"ContainerDied","Data":"2025b9012e9d047e209e35ef2efbeb64906a7db1080dcc5f50330f6594da1a61"} Feb 18 00:56:20 crc kubenswrapper[4791]: I0218 00:56:20.029917 4791 generic.go:334] "Generic (PLEG): container finished" podID="e48266b0-04c5-4da5-994e-516ef8e36299" containerID="2b619710bf1a07710c75b4fb604764fe25a6dd8f600aba6bd2159c423b54bf79" exitCode=0 Feb 18 00:56:20 crc kubenswrapper[4791]: I0218 00:56:20.029944 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wx5vg" event={"ID":"e48266b0-04c5-4da5-994e-516ef8e36299","Type":"ContainerDied","Data":"2b619710bf1a07710c75b4fb604764fe25a6dd8f600aba6bd2159c423b54bf79"} Feb 18 00:56:20 crc kubenswrapper[4791]: I0218 00:56:20.031748 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerStarted","Data":"10638f1ad38af4c73e52f31fd2e057ff8f2a23ea8fe0b0bb97f9f1ce96579515"} Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.457274 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.562853 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wx5vg" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.581399 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85jg6\" (UniqueName: \"kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6\") pod \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.581633 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle\") pod \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.581707 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data\") pod \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\" (UID: \"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.590583 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6" (OuterVolumeSpecName: "kube-api-access-85jg6") pod "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" (UID: "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7"). InnerVolumeSpecName "kube-api-access-85jg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.623134 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" (UID: "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.642921 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data" (OuterVolumeSpecName: "config-data") pod "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" (UID: "57936f2c-e43e-48c1-b59e-cfe1ae0f99b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.682943 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data\") pod \"e48266b0-04c5-4da5-994e-516ef8e36299\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683095 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle\") pod \"e48266b0-04c5-4da5-994e-516ef8e36299\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683138 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data\") pod \"e48266b0-04c5-4da5-994e-516ef8e36299\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683354 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4fk4\" (UniqueName: \"kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4\") pod \"e48266b0-04c5-4da5-994e-516ef8e36299\" (UID: \"e48266b0-04c5-4da5-994e-516ef8e36299\") " Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683812 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85jg6\" (UniqueName: \"kubernetes.io/projected/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-kube-api-access-85jg6\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683833 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.683844 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.685922 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e48266b0-04c5-4da5-994e-516ef8e36299" (UID: "e48266b0-04c5-4da5-994e-516ef8e36299"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.686350 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4" (OuterVolumeSpecName: "kube-api-access-f4fk4") pod "e48266b0-04c5-4da5-994e-516ef8e36299" (UID: "e48266b0-04c5-4da5-994e-516ef8e36299"). InnerVolumeSpecName "kube-api-access-f4fk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.717237 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e48266b0-04c5-4da5-994e-516ef8e36299" (UID: "e48266b0-04c5-4da5-994e-516ef8e36299"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.755462 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data" (OuterVolumeSpecName: "config-data") pod "e48266b0-04c5-4da5-994e-516ef8e36299" (UID: "e48266b0-04c5-4da5-994e-516ef8e36299"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.785458 4791 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.785494 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.785503 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48266b0-04c5-4da5-994e-516ef8e36299-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:21 crc kubenswrapper[4791]: I0218 00:56:21.785512 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4fk4\" (UniqueName: \"kubernetes.io/projected/e48266b0-04c5-4da5-994e-516ef8e36299-kube-api-access-f4fk4\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.053965 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wx5vg" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.053953 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wx5vg" event={"ID":"e48266b0-04c5-4da5-994e-516ef8e36299","Type":"ContainerDied","Data":"297d88e0754c0d95fd8511353f36084ef2576c3c07b4726ddab6e84af660d35b"} Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.054026 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="297d88e0754c0d95fd8511353f36084ef2576c3c07b4726ddab6e84af660d35b" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.056109 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-9rmnf" event={"ID":"57936f2c-e43e-48c1-b59e-cfe1ae0f99b7","Type":"ContainerDied","Data":"9a60f10eaa257f48a0a2bc763ba901ada196804d94f7a3e7c7d7ffedcb960ca3"} Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.056137 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a60f10eaa257f48a0a2bc763ba901ada196804d94f7a3e7c7d7ffedcb960ca3" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.056388 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-9rmnf" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.269330 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:22 crc kubenswrapper[4791]: E0218 00:56:22.269742 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e48266b0-04c5-4da5-994e-516ef8e36299" containerName="glance-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.269759 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e48266b0-04c5-4da5-994e-516ef8e36299" containerName="glance-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: E0218 00:56:22.269781 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" containerName="keystone-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.269788 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" containerName="keystone-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.269983 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" containerName="keystone-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.270000 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e48266b0-04c5-4da5-994e-516ef8e36299" containerName="glance-db-sync" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.280673 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.313529 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8gppj"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.322455 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.330352 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.330739 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49g64" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.330966 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.331150 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.331307 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.341551 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.415976 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8gppj"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420052 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420136 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420290 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420442 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-255wz\" (UniqueName: \"kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420569 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.420697 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.525936 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526012 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526039 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klwb6\" (UniqueName: \"kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526073 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526101 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526124 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526139 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526183 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526210 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-255wz\" (UniqueName: \"kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526250 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526266 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.526288 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.529827 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.530619 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.531137 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.531658 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.548936 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.575638 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-k7jrj"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.577277 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.592440 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.592680 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.592809 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wd7fx" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.608452 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-255wz\" (UniqueName: \"kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz\") pod \"dnsmasq-dns-55fff446b9-2xk8z\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.609050 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-k7jrj"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.627880 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-g5ccz"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.641483 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.639106 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.641862 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.642053 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.642561 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klwb6\" (UniqueName: \"kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.642629 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.642707 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.639756 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.650287 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.650587 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.652901 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.653764 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.659071 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: W0218 00:56:22.679601 4791 reflector.go:561] object-"openstack"/"heat-config-data": failed to list *v1.Secret: secrets "heat-config-data" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Feb 18 00:56:22 crc kubenswrapper[4791]: E0218 00:56:22.679642 4791 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"heat-config-data\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"heat-config-data\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.679763 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x765r" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.681866 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-ghjnw"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.692737 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.711285 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.715657 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klwb6\" (UniqueName: \"kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6\") pod \"keystone-bootstrap-8gppj\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.715841 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-486xb" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.716032 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.726792 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.740234 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-g5ccz"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744181 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744226 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8kbn\" (UniqueName: \"kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744275 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744334 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744348 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zbxc\" (UniqueName: \"kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744384 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.744405 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.785222 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-ghjnw"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.845891 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfhhq\" (UniqueName: \"kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.845933 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.845972 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846000 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8kbn\" (UniqueName: \"kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846019 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846048 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846063 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846093 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846113 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846128 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zbxc\" (UniqueName: \"kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846187 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.846212 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.852223 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.855957 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.856032 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ld9m6"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.857350 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.859692 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.865244 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.865262 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.879685 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.879926 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.880061 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-kjnsb" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.887831 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.916794 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zbxc\" (UniqueName: \"kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc\") pod \"cinder-db-sync-k7jrj\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.948341 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ld9m6"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.949675 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfhhq\" (UniqueName: \"kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950050 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950134 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950283 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950357 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpdlh\" (UniqueName: \"kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950441 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950531 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.950719 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.959382 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.961630 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8kbn\" (UniqueName: \"kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.961800 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.980233 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:22 crc kubenswrapper[4791]: I0218 00:56:22.984171 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfhhq\" (UniqueName: \"kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq\") pod \"neutron-db-sync-ghjnw\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.055342 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.055393 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpdlh\" (UniqueName: \"kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.055455 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.055580 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.055601 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.063372 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.149786 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.159844 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.161750 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.174445 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.183511 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpdlh\" (UniqueName: \"kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh\") pod \"placement-db-sync-ld9m6\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.215676 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.264909 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld9m6" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.308843 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-kwxmj"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.310284 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-kwxmj"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.310304 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-lxpbs"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.312738 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.327777 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-lxpbs"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.327818 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-kwxmj"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.327833 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.329008 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.329092 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.329234 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.341449 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.341672 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-fqnht" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.512369 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514040 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514174 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514284 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514364 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514535 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514598 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpv8p\" (UniqueName: \"kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514647 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514672 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514730 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514805 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.514905 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.515071 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwp5g\" (UniqueName: \"kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.515099 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzbkx\" (UniqueName: \"kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.515133 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: E0218 00:56:23.558080 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-jzbkx ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" podUID="675ac16c-a93b-4a48-959b-cecf98248783" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618449 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618797 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpv8p\" (UniqueName: \"kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618827 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618847 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618873 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618903 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618937 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.618986 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwp5g\" (UniqueName: \"kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619004 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzbkx\" (UniqueName: \"kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619024 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619051 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619086 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619118 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619167 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.619194 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.621761 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.622276 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.622946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.623805 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.624801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.624993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.625392 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.625473 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.625910 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.626132 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.638241 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.646712 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwp5g\" (UniqueName: \"kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.648714 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle\") pod \"barbican-db-sync-lxpbs\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.650213 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpv8p\" (UniqueName: \"kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p\") pod \"dnsmasq-dns-8b5c85b87-mfz79\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.673514 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzbkx\" (UniqueName: \"kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx\") pod \"dnsmasq-dns-76fcf4b695-kwxmj\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.773946 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.800306 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data\") pod \"heat-db-sync-g5ccz\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.875827 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.878283 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.881116 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.881683 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.886632 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.909601 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.920800 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:56:23 crc kubenswrapper[4791]: W0218 00:56:23.930295 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf404ea3f_ff70_4779_a73a_1e206e845a62.slice/crio-36fef6955d0b7aa14f08f71e441af06c7623c333d5aee97200d16dd0207a6d0f WatchSource:0}: Error finding container 36fef6955d0b7aa14f08f71e441af06c7623c333d5aee97200d16dd0207a6d0f: Status 404 returned error can't find the container with id 36fef6955d0b7aa14f08f71e441af06c7623c333d5aee97200d16dd0207a6d0f Feb 18 00:56:23 crc kubenswrapper[4791]: W0218 00:56:23.932675 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa96a31f_114c_4c5b_a4d1_baa8364b2d2f.slice/crio-b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084 WatchSource:0}: Error finding container b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084: Status 404 returned error can't find the container with id b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084 Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.939886 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:23 crc kubenswrapper[4791]: I0218 00:56:23.957765 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8gppj"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.032830 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.033652 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.033803 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.033910 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.034478 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.034711 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sznfl\" (UniqueName: \"kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.034868 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.101017 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-g5ccz" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.112990 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.118711 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.123781 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5r5zk" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.126620 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.126641 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.136932 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.136975 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.137023 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.137054 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.137089 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.137135 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sznfl\" (UniqueName: \"kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.137187 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.141640 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.141873 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.150255 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.151122 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.159046 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.164120 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.169216 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.172111 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" event={"ID":"f404ea3f-ff70-4779-a73a-1e206e845a62","Type":"ContainerStarted","Data":"36fef6955d0b7aa14f08f71e441af06c7623c333d5aee97200d16dd0207a6d0f"} Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.172611 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sznfl\" (UniqueName: \"kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl\") pod \"ceilometer-0\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.173226 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-k7jrj"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.187515 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.187523 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gppj" event={"ID":"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f","Type":"ContainerStarted","Data":"b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084"} Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.207924 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.234332 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246337 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246391 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246420 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246588 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246649 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjl4n\" (UniqueName: \"kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246683 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.246731 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.263562 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ld9m6"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.277414 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-ghjnw"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.348410 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.348502 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.350953 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzbkx\" (UniqueName: \"kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.351034 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.351120 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.351182 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config\") pod \"675ac16c-a93b-4a48-959b-cecf98248783\" (UID: \"675ac16c-a93b-4a48-959b-cecf98248783\") " Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.351394 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.352189 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.353526 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjl4n\" (UniqueName: \"kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.353684 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.353831 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.355011 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.355696 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.356092 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config" (OuterVolumeSpecName: "config") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362029 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362101 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362175 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362192 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362448 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362525 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362708 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362720 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362729 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362737 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.362747 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/675ac16c-a93b-4a48-959b-cecf98248783-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.364895 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx" (OuterVolumeSpecName: "kube-api-access-jzbkx") pod "675ac16c-a93b-4a48-959b-cecf98248783" (UID: "675ac16c-a93b-4a48-959b-cecf98248783"). InnerVolumeSpecName "kube-api-access-jzbkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.375477 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.375598 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/62820ba3c35aec4dc771d82e657fcb43f04a79384f8e10003d10025b9d199fc2/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.377530 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.379571 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.380849 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.404527 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjl4n\" (UniqueName: \"kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.455197 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.456882 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.462537 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.464481 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzbkx\" (UniqueName: \"kubernetes.io/projected/675ac16c-a93b-4a48-959b-cecf98248783-kube-api-access-jzbkx\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.476618 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.528728 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569069 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569392 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvjn4\" (UniqueName: \"kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569469 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569548 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569576 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569620 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.569650 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.670196 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.672974 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.673055 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.673148 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.673301 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.674732 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvjn4\" (UniqueName: \"kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.674831 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.674912 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.675065 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.678866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.689174 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.691753 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-lxpbs"] Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.697136 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.697186 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9790f76315acc35c8fd6eab9a9221b467468b56b96e913c660367fc7d70a609d/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.697387 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.697762 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.728934 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvjn4\" (UniqueName: \"kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.807030 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.813255 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:24 crc kubenswrapper[4791]: I0218 00:56:24.998717 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-g5ccz"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.052643 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.066294 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.321425 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ghjnw" event={"ID":"fca332cf-0111-45ef-b20f-726928d11b0b","Type":"ContainerStarted","Data":"7f3f56d0fce90539d28bc79714b4b1825db0ffd6b764b7fc59289c2c1e950c2c"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.321748 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ghjnw" event={"ID":"fca332cf-0111-45ef-b20f-726928d11b0b","Type":"ContainerStarted","Data":"359e68f38c88ce7476ee8964b1b8cc94a081c9d8e63cd441e32b0f8f2e06039c"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.324408 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.398890 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld9m6" event={"ID":"082acdfd-08b8-4986-8091-22d29ab897f3","Type":"ContainerStarted","Data":"d0f5bf6d9bab479f6420fc59ac95a297dab72e8047d930a81370f41d0d538aa7"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.454531 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-ghjnw" podStartSLOduration=3.454505018 podStartE2EDuration="3.454505018s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:25.356541682 +0000 UTC m=+1326.924554852" watchObservedRunningTime="2026-02-18 00:56:25.454505018 +0000 UTC m=+1327.022518188" Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.454891 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-k7jrj" event={"ID":"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0","Type":"ContainerStarted","Data":"3a68de1ab1b03269611951f0ab2b3f7a708a644f28f834d646fbd21b7f0c9f60"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.485530 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-g5ccz" event={"ID":"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8","Type":"ContainerStarted","Data":"2800c20b824e4ee5ee52adec5302a11c018aab867e02bed9358b9093de7b90f0"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.524039 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerStarted","Data":"561a076ee84652b53ce5a564cba79ea0ff1bb5938aa8cdfee1a8994c6cd72885"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.561792 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" event={"ID":"e359378c-e000-4599-8f4d-e3169c37a22e","Type":"ContainerStarted","Data":"12b580499651f5c577fca2e4341d8e7d8e7b3c42627bce0c62974a8faf4c6861"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.618598 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lxpbs" event={"ID":"5ddd4e16-7034-4925-8bba-2320640dd8b7","Type":"ContainerStarted","Data":"02621d95b1e2f1ded23ba873dd8279bd620bfbd6eb3aa64a75a83f07b6b94eee"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.641468 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.668405 4791 generic.go:334] "Generic (PLEG): container finished" podID="f404ea3f-ff70-4779-a73a-1e206e845a62" containerID="b1d3020aa8f43cc1ca62c074d5671f5ea7824b3c1b25bbd4f63c78aa3357842e" exitCode=0 Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.668699 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" event={"ID":"f404ea3f-ff70-4779-a73a-1e206e845a62","Type":"ContainerDied","Data":"b1d3020aa8f43cc1ca62c074d5671f5ea7824b3c1b25bbd4f63c78aa3357842e"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.681611 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-kwxmj" Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.682648 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gppj" event={"ID":"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f","Type":"ContainerStarted","Data":"c7d22ab55eda80d0c19e034ae89783c0e11b480e209f4e9a870fad65b3943224"} Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.837065 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8gppj" podStartSLOduration=3.837047573 podStartE2EDuration="3.837047573s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:25.743444552 +0000 UTC m=+1327.311457722" watchObservedRunningTime="2026-02-18 00:56:25.837047573 +0000 UTC m=+1327.405060743" Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.897742 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.916774 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-kwxmj"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.938070 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-kwxmj"] Feb 18 00:56:25 crc kubenswrapper[4791]: I0218 00:56:25.956058 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.190666 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:26 crc kubenswrapper[4791]: W0218 00:56:26.211103 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ab98c33_b4f1_4b9a_ab3f_51c92145bc03.slice/crio-3e6f0259f2739494dad35f50176bc01eec7832f43361c5665a24a2850f24256d WatchSource:0}: Error finding container 3e6f0259f2739494dad35f50176bc01eec7832f43361c5665a24a2850f24256d: Status 404 returned error can't find the container with id 3e6f0259f2739494dad35f50176bc01eec7832f43361c5665a24a2850f24256d Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.457652 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508142 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508196 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508265 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-255wz\" (UniqueName: \"kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508335 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508368 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.508470 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb\") pod \"f404ea3f-ff70-4779-a73a-1e206e845a62\" (UID: \"f404ea3f-ff70-4779-a73a-1e206e845a62\") " Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.513791 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz" (OuterVolumeSpecName: "kube-api-access-255wz") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "kube-api-access-255wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.549443 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config" (OuterVolumeSpecName: "config") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.558690 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.563181 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.577115 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.578178 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f404ea3f-ff70-4779-a73a-1e206e845a62" (UID: "f404ea3f-ff70-4779-a73a-1e206e845a62"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611421 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611459 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611469 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611480 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611488 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f404ea3f-ff70-4779-a73a-1e206e845a62-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.611498 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-255wz\" (UniqueName: \"kubernetes.io/projected/f404ea3f-ff70-4779-a73a-1e206e845a62-kube-api-access-255wz\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.754959 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerStarted","Data":"979a8938e9f007a9d82f63e560d310c2a98eaf6ad01d7c7132a6358fc4155c91"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.760780 4791 generic.go:334] "Generic (PLEG): container finished" podID="e359378c-e000-4599-8f4d-e3169c37a22e" containerID="1338edf2ac576ae9a7f8d612b65496f573822b69899fdddb2d71e5898952f470" exitCode=0 Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.761272 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" event={"ID":"e359378c-e000-4599-8f4d-e3169c37a22e","Type":"ContainerStarted","Data":"2014c27c4ab964764f2316f2379b6cd8124bba1b1c25ac92580a34f5c63d1cfd"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.761302 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" event={"ID":"e359378c-e000-4599-8f4d-e3169c37a22e","Type":"ContainerDied","Data":"1338edf2ac576ae9a7f8d612b65496f573822b69899fdddb2d71e5898952f470"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.761348 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.766248 4791 generic.go:334] "Generic (PLEG): container finished" podID="6658e3ff-2b3b-476c-8638-a5b3d94005d4" containerID="10638f1ad38af4c73e52f31fd2e057ff8f2a23ea8fe0b0bb97f9f1ce96579515" exitCode=0 Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.766324 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerDied","Data":"10638f1ad38af4c73e52f31fd2e057ff8f2a23ea8fe0b0bb97f9f1ce96579515"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.773157 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" event={"ID":"f404ea3f-ff70-4779-a73a-1e206e845a62","Type":"ContainerDied","Data":"36fef6955d0b7aa14f08f71e441af06c7623c333d5aee97200d16dd0207a6d0f"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.773210 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-2xk8z" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.773219 4791 scope.go:117] "RemoveContainer" containerID="b1d3020aa8f43cc1ca62c074d5671f5ea7824b3c1b25bbd4f63c78aa3357842e" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.792488 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" podStartSLOduration=3.792474461 podStartE2EDuration="3.792474461s" podCreationTimestamp="2026-02-18 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:26.789677814 +0000 UTC m=+1328.357690984" watchObservedRunningTime="2026-02-18 00:56:26.792474461 +0000 UTC m=+1328.360487631" Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.795868 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerStarted","Data":"3e6f0259f2739494dad35f50176bc01eec7832f43361c5665a24a2850f24256d"} Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.949032 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:26 crc kubenswrapper[4791]: I0218 00:56:26.963346 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-2xk8z"] Feb 18 00:56:27 crc kubenswrapper[4791]: I0218 00:56:27.110765 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="675ac16c-a93b-4a48-959b-cecf98248783" path="/var/lib/kubelet/pods/675ac16c-a93b-4a48-959b-cecf98248783/volumes" Feb 18 00:56:27 crc kubenswrapper[4791]: I0218 00:56:27.111449 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f404ea3f-ff70-4779-a73a-1e206e845a62" path="/var/lib/kubelet/pods/f404ea3f-ff70-4779-a73a-1e206e845a62/volumes" Feb 18 00:56:27 crc kubenswrapper[4791]: I0218 00:56:27.823235 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerStarted","Data":"4a0a015dbab52db683c4b361d4f9d3858a9a78ad00e2dd479f55e609d6bf2e6b"} Feb 18 00:56:27 crc kubenswrapper[4791]: I0218 00:56:27.839396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerStarted","Data":"5628ffb573bed69506e0d0772cf926dac0536ca60491aab5833ed84ddc153191"} Feb 18 00:56:27 crc kubenswrapper[4791]: I0218 00:56:27.863754 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerStarted","Data":"f2cf76e68c1ffeb51805e3ab0ad3573058ea1542559e7f96ae185360df301f3b"} Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.883036 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerStarted","Data":"d0534b314685230e288925fff6490e36c2633215dfeb04692eb5a00ca7cb3de0"} Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.883111 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-log" containerID="cri-o://4a0a015dbab52db683c4b361d4f9d3858a9a78ad00e2dd479f55e609d6bf2e6b" gracePeriod=30 Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.883190 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-httpd" containerID="cri-o://d0534b314685230e288925fff6490e36c2633215dfeb04692eb5a00ca7cb3de0" gracePeriod=30 Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.888145 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerStarted","Data":"4f97989c772a75d4bade621ac6510ec4aec04948a96bed8459c1b6f8721cf86b"} Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.888941 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-log" containerID="cri-o://5628ffb573bed69506e0d0772cf926dac0536ca60491aab5833ed84ddc153191" gracePeriod=30 Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.889007 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-httpd" containerID="cri-o://4f97989c772a75d4bade621ac6510ec4aec04948a96bed8459c1b6f8721cf86b" gracePeriod=30 Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.917176 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.917141303 podStartE2EDuration="5.917141303s" podCreationTimestamp="2026-02-18 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:28.902049665 +0000 UTC m=+1330.470062835" watchObservedRunningTime="2026-02-18 00:56:28.917141303 +0000 UTC m=+1330.485154483" Feb 18 00:56:28 crc kubenswrapper[4791]: I0218 00:56:28.937260 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.937238606 podStartE2EDuration="5.937238606s" podCreationTimestamp="2026-02-18 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:28.929416784 +0000 UTC m=+1330.497429954" watchObservedRunningTime="2026-02-18 00:56:28.937238606 +0000 UTC m=+1330.505251776" Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.911420 4791 generic.go:334] "Generic (PLEG): container finished" podID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerID="d0534b314685230e288925fff6490e36c2633215dfeb04692eb5a00ca7cb3de0" exitCode=143 Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.911916 4791 generic.go:334] "Generic (PLEG): container finished" podID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerID="4a0a015dbab52db683c4b361d4f9d3858a9a78ad00e2dd479f55e609d6bf2e6b" exitCode=143 Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.911570 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerDied","Data":"d0534b314685230e288925fff6490e36c2633215dfeb04692eb5a00ca7cb3de0"} Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.911997 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerDied","Data":"4a0a015dbab52db683c4b361d4f9d3858a9a78ad00e2dd479f55e609d6bf2e6b"} Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.914253 4791 generic.go:334] "Generic (PLEG): container finished" podID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerID="4f97989c772a75d4bade621ac6510ec4aec04948a96bed8459c1b6f8721cf86b" exitCode=0 Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.914277 4791 generic.go:334] "Generic (PLEG): container finished" podID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerID="5628ffb573bed69506e0d0772cf926dac0536ca60491aab5833ed84ddc153191" exitCode=143 Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.914322 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerDied","Data":"4f97989c772a75d4bade621ac6510ec4aec04948a96bed8459c1b6f8721cf86b"} Feb 18 00:56:29 crc kubenswrapper[4791]: I0218 00:56:29.914338 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerDied","Data":"5628ffb573bed69506e0d0772cf926dac0536ca60491aab5833ed84ddc153191"} Feb 18 00:56:30 crc kubenswrapper[4791]: I0218 00:56:30.933388 4791 generic.go:334] "Generic (PLEG): container finished" podID="aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" containerID="c7d22ab55eda80d0c19e034ae89783c0e11b480e209f4e9a870fad65b3943224" exitCode=0 Feb 18 00:56:30 crc kubenswrapper[4791]: I0218 00:56:30.933454 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gppj" event={"ID":"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f","Type":"ContainerDied","Data":"c7d22ab55eda80d0c19e034ae89783c0e11b480e209f4e9a870fad65b3943224"} Feb 18 00:56:30 crc kubenswrapper[4791]: I0218 00:56:30.936606 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerStarted","Data":"b303aa0efabfa63059c2cbeca9387c1ddd283fe69200088bf0e19974463876cb"} Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.109246 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.119370 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233440 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvjn4\" (UniqueName: \"kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233511 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233577 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233629 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233705 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233759 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233779 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233824 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.233974 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.234010 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts\") pod \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\" (UID: \"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.234034 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.234099 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.234134 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjl4n\" (UniqueName: \"kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.234207 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts\") pod \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\" (UID: \"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363\") " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.235376 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs" (OuterVolumeSpecName: "logs") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.235633 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs" (OuterVolumeSpecName: "logs") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.236109 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.237996 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.237987 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.238020 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.238346 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.241206 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4" (OuterVolumeSpecName: "kube-api-access-tvjn4") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "kube-api-access-tvjn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.242618 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts" (OuterVolumeSpecName: "scripts") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.243553 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts" (OuterVolumeSpecName: "scripts") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.250601 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n" (OuterVolumeSpecName: "kube-api-access-zjl4n") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "kube-api-access-zjl4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.276067 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.284343 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (OuterVolumeSpecName: "glance") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "pvc-4472ce99-9885-40a2-bc85-66819bd1580e". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.289847 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.309632 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (OuterVolumeSpecName: "glance") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.311144 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data" (OuterVolumeSpecName: "config-data") pod "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" (UID: "a0e200fe-5ca3-4ada-ba0e-29c1f4dae363"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.311859 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data" (OuterVolumeSpecName: "config-data") pod "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" (UID: "9ab98c33-b4f1-4b9a-ab3f-51c92145bc03"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.340855 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.340971 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.340987 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjl4n\" (UniqueName: \"kubernetes.io/projected/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-kube-api-access-zjl4n\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.340996 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341005 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvjn4\" (UniqueName: \"kubernetes.io/projected/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-kube-api-access-tvjn4\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341013 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341020 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341028 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341037 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341053 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" " Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.341062 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.366681 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.366842 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e") on node "crc" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.367392 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.367500 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5") on node "crc" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.443882 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.443925 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.963468 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.963481 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9ab98c33-b4f1-4b9a-ab3f-51c92145bc03","Type":"ContainerDied","Data":"3e6f0259f2739494dad35f50176bc01eec7832f43361c5665a24a2850f24256d"} Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.964104 4791 scope.go:117] "RemoveContainer" containerID="d0534b314685230e288925fff6490e36c2633215dfeb04692eb5a00ca7cb3de0" Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.966721 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a0e200fe-5ca3-4ada-ba0e-29c1f4dae363","Type":"ContainerDied","Data":"979a8938e9f007a9d82f63e560d310c2a98eaf6ad01d7c7132a6358fc4155c91"} Feb 18 00:56:32 crc kubenswrapper[4791]: I0218 00:56:32.966825 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.057609 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.101343 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.108690 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.131917 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.132437 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132453 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.132464 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132471 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.132491 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132497 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.132513 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132518 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.132543 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f404ea3f-ff70-4779-a73a-1e206e845a62" containerName="init" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132548 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f404ea3f-ff70-4779-a73a-1e206e845a62" containerName="init" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132731 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132743 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-httpd" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132754 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f404ea3f-ff70-4779-a73a-1e206e845a62" containerName="init" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132779 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.132786 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" containerName="glance-log" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.133930 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.139954 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.140190 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.143328 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5r5zk" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.171694 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.185848 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.198354 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.200484 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.200653 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.200863 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.201026 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.201117 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.201221 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.201358 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.201451 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgrv7\" (UniqueName: \"kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.209672 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.221208 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304359 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304434 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304467 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304543 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304598 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304619 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304645 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9fxm\" (UniqueName: \"kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304667 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304695 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304723 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304768 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304793 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304811 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgrv7\" (UniqueName: \"kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.304833 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.305720 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.305868 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.308893 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.308932 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9790f76315acc35c8fd6eab9a9221b467468b56b96e913c660367fc7d70a609d/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.312966 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.320113 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.320333 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.326925 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgrv7\" (UniqueName: \"kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.388973 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406377 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406539 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406574 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9fxm\" (UniqueName: \"kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406614 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406653 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406718 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.406748 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.407079 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.408078 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.411378 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.412540 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.412709 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.431597 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.431649 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/62820ba3c35aec4dc771d82e657fcb43f04a79384f8e10003d10025b9d199fc2/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.442355 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9fxm\" (UniqueName: \"kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.463864 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.565669 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: E0218 00:56:33.566820 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[glance], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="6926bfc8-8aed-4893-912b-4969533ad0e5" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.686059 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.713140 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.888363 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.946709 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.946974 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" containerID="cri-o://0bb2b7ee20a8bb64f7435a146b058241798837a866812c67cea49c071f19184c" gracePeriod=10 Feb 18 00:56:33 crc kubenswrapper[4791]: I0218 00:56:33.976457 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.038980 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.120666 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.120815 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.121022 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9fxm\" (UniqueName: \"kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.121327 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.122200 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.122314 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.122395 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.122423 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs\") pod \"6926bfc8-8aed-4893-912b-4969533ad0e5\" (UID: \"6926bfc8-8aed-4893-912b-4969533ad0e5\") " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.123412 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs" (OuterVolumeSpecName: "logs") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.123840 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.123929 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6926bfc8-8aed-4893-912b-4969533ad0e5-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.127371 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.127592 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm" (OuterVolumeSpecName: "kube-api-access-j9fxm") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "kube-api-access-j9fxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.127761 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts" (OuterVolumeSpecName: "scripts") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.128529 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data" (OuterVolumeSpecName: "config-data") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.145617 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (OuterVolumeSpecName: "glance") pod "6926bfc8-8aed-4893-912b-4969533ad0e5" (UID: "6926bfc8-8aed-4893-912b-4969533ad0e5"). InnerVolumeSpecName "pvc-4472ce99-9885-40a2-bc85-66819bd1580e". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.226193 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" " Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.226510 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9fxm\" (UniqueName: \"kubernetes.io/projected/6926bfc8-8aed-4893-912b-4969533ad0e5-kube-api-access-j9fxm\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.226521 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.226531 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.226539 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6926bfc8-8aed-4893-912b-4969533ad0e5-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.251668 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.251838 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e") on node "crc" Feb 18 00:56:34 crc kubenswrapper[4791]: I0218 00:56:34.331897 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.005285 4791 generic.go:334] "Generic (PLEG): container finished" podID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerID="0bb2b7ee20a8bb64f7435a146b058241798837a866812c67cea49c071f19184c" exitCode=0 Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.005359 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" event={"ID":"ea161630-d65f-4b00-bcf3-bc3822b3011e","Type":"ContainerDied","Data":"0bb2b7ee20a8bb64f7435a146b058241798837a866812c67cea49c071f19184c"} Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.005460 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.146862 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ab98c33-b4f1-4b9a-ab3f-51c92145bc03" path="/var/lib/kubelet/pods/9ab98c33-b4f1-4b9a-ab3f-51c92145bc03/volumes" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.150262 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0e200fe-5ca3-4ada-ba0e-29c1f4dae363" path="/var/lib/kubelet/pods/a0e200fe-5ca3-4ada-ba0e-29c1f4dae363/volumes" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.151445 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.157278 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.175463 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.195958 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.196061 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.202661 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.202882 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 18 00:56:35 crc kubenswrapper[4791]: E0218 00:56:35.262343 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6926bfc8_8aed_4893_912b_4969533ad0e5.slice\": RecentStats: unable to find data in memory cache]" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358641 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358705 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358755 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvh9c\" (UniqueName: \"kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358796 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358832 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358867 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358901 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.358953 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461143 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461439 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461481 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461522 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvh9c\" (UniqueName: \"kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461559 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461596 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461645 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.461684 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.463004 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.463042 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.466488 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.466600 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.467460 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.468369 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.468565 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.468644 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/62820ba3c35aec4dc771d82e657fcb43f04a79384f8e10003d10025b9d199fc2/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.480827 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvh9c\" (UniqueName: \"kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.517963 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.523318 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.758496 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.870800 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.871019 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.871068 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.871214 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.871254 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.871296 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klwb6\" (UniqueName: \"kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6\") pod \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\" (UID: \"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f\") " Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.875594 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.876139 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6" (OuterVolumeSpecName: "kube-api-access-klwb6") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "kube-api-access-klwb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.876217 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts" (OuterVolumeSpecName: "scripts") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.877027 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.901758 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data" (OuterVolumeSpecName: "config-data") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.906870 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" (UID: "aa96a31f-114c-4c5b-a4d1-baa8364b2d2f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.973984 4791 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.974019 4791 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.974028 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.974036 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.974045 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:35 crc kubenswrapper[4791]: I0218 00:56:35.974054 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klwb6\" (UniqueName: \"kubernetes.io/projected/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f-kube-api-access-klwb6\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.017179 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gppj" event={"ID":"aa96a31f-114c-4c5b-a4d1-baa8364b2d2f","Type":"ContainerDied","Data":"b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084"} Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.017219 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9ec040462596f8e41b9cf8d0d933a258e10a9098a446bcfc5acee9ad17b6084" Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.017304 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gppj" Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.947832 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.960992 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8gppj"] Feb 18 00:56:36 crc kubenswrapper[4791]: I0218 00:56:36.977713 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8gppj"] Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.036765 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-pjx58"] Feb 18 00:56:37 crc kubenswrapper[4791]: E0218 00:56:37.037282 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" containerName="keystone-bootstrap" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.037301 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" containerName="keystone-bootstrap" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.037483 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" containerName="keystone-bootstrap" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.038186 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.044234 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.044270 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.044621 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.044745 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49g64" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.044909 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.081450 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6926bfc8-8aed-4893-912b-4969533ad0e5" path="/var/lib/kubelet/pods/6926bfc8-8aed-4893-912b-4969533ad0e5/volumes" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.082140 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa96a31f-114c-4c5b-a4d1-baa8364b2d2f" path="/var/lib/kubelet/pods/aa96a31f-114c-4c5b-a4d1-baa8364b2d2f/volumes" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.083332 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pjx58"] Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.156818 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.156912 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.156938 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.156963 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.157061 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmg9k\" (UniqueName: \"kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.157219 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259630 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259690 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259716 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259738 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259826 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmg9k\" (UniqueName: \"kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.259882 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.264015 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.264323 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.265209 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.266109 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.270662 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.277093 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmg9k\" (UniqueName: \"kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k\") pod \"keystone-bootstrap-pjx58\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:37 crc kubenswrapper[4791]: I0218 00:56:37.368461 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:56:39 crc kubenswrapper[4791]: I0218 00:56:39.281269 4791 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod6eb117b6-f49c-4bc7-a59e-50c32713d4a2"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod6eb117b6-f49c-4bc7-a59e-50c32713d4a2] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6eb117b6_f49c_4bc7_a59e_50c32713d4a2.slice" Feb 18 00:56:41 crc kubenswrapper[4791]: I0218 00:56:41.947561 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Feb 18 00:56:43 crc kubenswrapper[4791]: E0218 00:56:43.586582 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Feb 18 00:56:43 crc kubenswrapper[4791]: E0218 00:56:43.587325 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mwp5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-lxpbs_openstack(5ddd4e16-7034-4925-8bba-2320640dd8b7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:56:43 crc kubenswrapper[4791]: E0218 00:56:43.588493 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-lxpbs" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" Feb 18 00:56:44 crc kubenswrapper[4791]: E0218 00:56:44.123797 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-lxpbs" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" Feb 18 00:56:45 crc kubenswrapper[4791]: I0218 00:56:45.135329 4791 generic.go:334] "Generic (PLEG): container finished" podID="fca332cf-0111-45ef-b20f-726928d11b0b" containerID="7f3f56d0fce90539d28bc79714b4b1825db0ffd6b764b7fc59289c2c1e950c2c" exitCode=0 Feb 18 00:56:45 crc kubenswrapper[4791]: I0218 00:56:45.135403 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ghjnw" event={"ID":"fca332cf-0111-45ef-b20f-726928d11b0b","Type":"ContainerDied","Data":"7f3f56d0fce90539d28bc79714b4b1825db0ffd6b764b7fc59289c2c1e950c2c"} Feb 18 00:56:51 crc kubenswrapper[4791]: I0218 00:56:51.947328 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: i/o timeout" Feb 18 00:56:51 crc kubenswrapper[4791]: I0218 00:56:51.948072 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:56:53 crc kubenswrapper[4791]: E0218 00:56:53.507111 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Feb 18 00:56:53 crc kubenswrapper[4791]: E0218 00:56:53.507580 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j8kbn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-g5ccz_openstack(4f39e5ec-0b51-4c0e-9a95-95c3e69163b8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:56:53 crc kubenswrapper[4791]: E0218 00:56:53.508794 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-g5ccz" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.535646 4791 scope.go:117] "RemoveContainer" containerID="4a0a015dbab52db683c4b361d4f9d3858a9a78ad00e2dd479f55e609d6bf2e6b" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.653998 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.661778 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796200 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh6jx\" (UniqueName: \"kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796260 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796342 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796450 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfhhq\" (UniqueName: \"kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq\") pod \"fca332cf-0111-45ef-b20f-726928d11b0b\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796476 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle\") pod \"fca332cf-0111-45ef-b20f-726928d11b0b\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796502 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796529 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config\") pod \"fca332cf-0111-45ef-b20f-726928d11b0b\" (UID: \"fca332cf-0111-45ef-b20f-726928d11b0b\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796575 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.796631 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc\") pod \"ea161630-d65f-4b00-bcf3-bc3822b3011e\" (UID: \"ea161630-d65f-4b00-bcf3-bc3822b3011e\") " Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.801002 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx" (OuterVolumeSpecName: "kube-api-access-bh6jx") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "kube-api-access-bh6jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.801521 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq" (OuterVolumeSpecName: "kube-api-access-gfhhq") pod "fca332cf-0111-45ef-b20f-726928d11b0b" (UID: "fca332cf-0111-45ef-b20f-726928d11b0b"). InnerVolumeSpecName "kube-api-access-gfhhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.833569 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config" (OuterVolumeSpecName: "config") pod "fca332cf-0111-45ef-b20f-726928d11b0b" (UID: "fca332cf-0111-45ef-b20f-726928d11b0b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.837113 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fca332cf-0111-45ef-b20f-726928d11b0b" (UID: "fca332cf-0111-45ef-b20f-726928d11b0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.850049 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.860663 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.864352 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config" (OuterVolumeSpecName: "config") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.864849 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.869826 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea161630-d65f-4b00-bcf3-bc3822b3011e" (UID: "ea161630-d65f-4b00-bcf3-bc3822b3011e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900191 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900228 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900239 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh6jx\" (UniqueName: \"kubernetes.io/projected/ea161630-d65f-4b00-bcf3-bc3822b3011e-kube-api-access-bh6jx\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900294 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900333 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900350 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfhhq\" (UniqueName: \"kubernetes.io/projected/fca332cf-0111-45ef-b20f-726928d11b0b-kube-api-access-gfhhq\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900360 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900368 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea161630-d65f-4b00-bcf3-bc3822b3011e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:53 crc kubenswrapper[4791]: I0218 00:56:53.900376 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fca332cf-0111-45ef-b20f-726928d11b0b-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.240605 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ghjnw" event={"ID":"fca332cf-0111-45ef-b20f-726928d11b0b","Type":"ContainerDied","Data":"359e68f38c88ce7476ee8964b1b8cc94a081c9d8e63cd441e32b0f8f2e06039c"} Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.241000 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="359e68f38c88ce7476ee8964b1b8cc94a081c9d8e63cd441e32b0f8f2e06039c" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.240902 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ghjnw" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.244023 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" event={"ID":"ea161630-d65f-4b00-bcf3-bc3822b3011e","Type":"ContainerDied","Data":"83b5a5762e3c069337f5cbbb5292d858e10f0440f48b60a33da6159c4e30c8b5"} Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.244179 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.246474 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-g5ccz" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.289210 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.298987 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-96ptc"] Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.898281 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.898743 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4zbxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-k7jrj_openstack(e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.900036 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-k7jrj" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.945244 4791 scope.go:117] "RemoveContainer" containerID="4f97989c772a75d4bade621ac6510ec4aec04948a96bed8459c1b6f8721cf86b" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.962787 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.963696 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.963712 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.963737 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="init" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.963743 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="init" Feb 18 00:56:54 crc kubenswrapper[4791]: E0218 00:56:54.963763 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca332cf-0111-45ef-b20f-726928d11b0b" containerName="neutron-db-sync" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.963770 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca332cf-0111-45ef-b20f-726928d11b0b" containerName="neutron-db-sync" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.964262 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca332cf-0111-45ef-b20f-726928d11b0b" containerName="neutron-db-sync" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.964287 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" Feb 18 00:56:54 crc kubenswrapper[4791]: I0218 00:56:54.969234 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:54.999421 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.121837 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" path="/var/lib/kubelet/pods/ea161630-d65f-4b00-bcf3-bc3822b3011e/volumes" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.129670 4791 scope.go:117] "RemoveContainer" containerID="5628ffb573bed69506e0d0772cf926dac0536ca60491aab5833ed84ddc153191" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.150578 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.151831 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.151885 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.152006 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.152080 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.152112 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq25c\" (UniqueName: \"kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.152305 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.156580 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.161572 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.161838 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-486xb" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.161891 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.161970 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.189363 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.222568 4791 scope.go:117] "RemoveContainer" containerID="0bb2b7ee20a8bb64f7435a146b058241798837a866812c67cea49c071f19184c" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.256718 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.256805 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.256960 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.257688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.257777 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.257827 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvjwb\" (UniqueName: \"kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.257899 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.257996 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.258067 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq25c\" (UniqueName: \"kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.258134 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.258174 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.258628 4791 scope.go:117] "RemoveContainer" containerID="8b9b2bcf2e3a619792c6167a262a64ea9092ccd1474b11c988188fdb50bf7de2" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.258807 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.259971 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.260967 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.261718 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.268562 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.290898 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq25c\" (UniqueName: \"kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c\") pod \"dnsmasq-dns-84b966f6c9-9s7nn\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: E0218 00:56:55.304215 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-k7jrj" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.310675 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.367294 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.367464 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.367549 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvjwb\" (UniqueName: \"kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.367613 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.367682 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.371720 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.379861 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.389853 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.393073 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.470505 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvjwb\" (UniqueName: \"kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb\") pod \"neutron-8694564946-rt9m5\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.531427 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.714725 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.860601 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:56:55 crc kubenswrapper[4791]: I0218 00:56:55.932320 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pjx58"] Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.157573 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.320057 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pjx58" event={"ID":"69958a35-9d4e-4e70-b21b-525ffef5d9da","Type":"ContainerStarted","Data":"7921f3424b7c06552782a6fa857761505524e5ef046f5f6e9d2c12d8ad2c9bbe"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.332213 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld9m6" event={"ID":"082acdfd-08b8-4986-8091-22d29ab897f3","Type":"ContainerStarted","Data":"402e56035ac323209b0d643883d293c8bf2dae4d527683ce023127f2618db865"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.355405 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerStarted","Data":"3889b552ff289a28e5e4cc12420115972d2ee078f45b58a477b98b82f546f9ba"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.360756 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerStarted","Data":"13804206f29b8a896baf9d18d433f189c264418eb140b0f9390c85c4208f93cf"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.364127 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerStarted","Data":"c10fa45ca056ac80520502873bc5c6375a6d6047634d7f5200e31cc5fd512f48"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.364736 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ld9m6" podStartSLOduration=5.207341967 podStartE2EDuration="34.364725425s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="2026-02-18 00:56:24.35773792 +0000 UTC m=+1325.925751090" lastFinishedPulling="2026-02-18 00:56:53.515121378 +0000 UTC m=+1355.083134548" observedRunningTime="2026-02-18 00:56:56.348803461 +0000 UTC m=+1357.916816631" watchObservedRunningTime="2026-02-18 00:56:56.364725425 +0000 UTC m=+1357.932738595" Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.381037 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"6658e3ff-2b3b-476c-8638-a5b3d94005d4","Type":"ContainerStarted","Data":"843da55a93b3559748fea650ae2079509b642b3e6a838eaf5d7d436c03cafc4c"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.386607 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.426722 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" event={"ID":"f76d819d-a178-42b5-bc1d-642c8684a05b","Type":"ContainerStarted","Data":"58ef14401bbb084b9037696844f75ff4049a7dbebd3006c5e3235257aa7571e0"} Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.439274 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=41.439257864 podStartE2EDuration="41.439257864s" podCreationTimestamp="2026-02-18 00:56:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:56.406479069 +0000 UTC m=+1357.974492239" watchObservedRunningTime="2026-02-18 00:56:56.439257864 +0000 UTC m=+1358.007271034" Feb 18 00:56:56 crc kubenswrapper[4791]: W0218 00:56:56.461775 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c4f2d33_853b_4b26_9050_a95f8a0aacff.slice/crio-a604e5c5f0f3ffae4286698e7913c6b9f92fd1bd495cbff2723c520b5823d683 WatchSource:0}: Error finding container a604e5c5f0f3ffae4286698e7913c6b9f92fd1bd495cbff2723c520b5823d683: Status 404 returned error can't find the container with id a604e5c5f0f3ffae4286698e7913c6b9f92fd1bd495cbff2723c520b5823d683 Feb 18 00:56:56 crc kubenswrapper[4791]: I0218 00:56:56.948224 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-96ptc" podUID="ea161630-d65f-4b00-bcf3-bc3822b3011e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: i/o timeout" Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.460555 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerStarted","Data":"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.464414 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerStarted","Data":"274b8e99cc69b5607a27e5b87eed51926d9c1ac4a2fed3e8f3527894e8ab7f5f"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.464465 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerStarted","Data":"a604e5c5f0f3ffae4286698e7913c6b9f92fd1bd495cbff2723c520b5823d683"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.465524 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.483985 4791 generic.go:334] "Generic (PLEG): container finished" podID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerID="d384decf40d59f4ba2dec8e75acfc449cf8a5c5dd621dd567534d6975d54d198" exitCode=0 Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.485026 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" event={"ID":"f76d819d-a178-42b5-bc1d-642c8684a05b","Type":"ContainerDied","Data":"d384decf40d59f4ba2dec8e75acfc449cf8a5c5dd621dd567534d6975d54d198"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.488891 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pjx58" event={"ID":"69958a35-9d4e-4e70-b21b-525ffef5d9da","Type":"ContainerStarted","Data":"d1179326184c0774c035050ce21c7a60bc5d040a13893bf2844e7608b33a0205"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.503668 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerStarted","Data":"a2fb9d3f2721062df8346d68d8ee9af9d4ba452dfc9e2048ab3e38c7a86ecb4b"} Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.516737 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8694564946-rt9m5" podStartSLOduration=2.516700093 podStartE2EDuration="2.516700093s" podCreationTimestamp="2026-02-18 00:56:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:57.484705322 +0000 UTC m=+1359.052718492" watchObservedRunningTime="2026-02-18 00:56:57.516700093 +0000 UTC m=+1359.084713253" Feb 18 00:56:57 crc kubenswrapper[4791]: I0218 00:56:57.601179 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-pjx58" podStartSLOduration=21.60114076 podStartE2EDuration="21.60114076s" podCreationTimestamp="2026-02-18 00:56:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:56:57.542489793 +0000 UTC m=+1359.110502963" watchObservedRunningTime="2026-02-18 00:56:57.60114076 +0000 UTC m=+1359.169153950" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.147583 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.150844 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.153333 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.154325 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.188518 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.329623 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.329766 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.329918 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.329949 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.330023 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.330054 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.330085 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfqvp\" (UniqueName: \"kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.431961 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432013 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432085 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432109 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432130 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfqvp\" (UniqueName: \"kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432161 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.432238 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.438733 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.442282 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.446654 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.446873 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.447239 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.449800 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.475856 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfqvp\" (UniqueName: \"kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp\") pod \"neutron-5ff469dfbc-w7krb\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.480962 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:56:58 crc kubenswrapper[4791]: I0218 00:56:58.556916 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerStarted","Data":"8abe60f3df0b33e090dc83d4165c66f72192c57426bc2db0ec4358908d91390e"} Feb 18 00:56:59 crc kubenswrapper[4791]: I0218 00:56:59.567255 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerStarted","Data":"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa"} Feb 18 00:57:00 crc kubenswrapper[4791]: I0218 00:57:00.802429 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Feb 18 00:57:00 crc kubenswrapper[4791]: I0218 00:57:00.802895 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Feb 18 00:57:00 crc kubenswrapper[4791]: I0218 00:57:00.809026 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.588768 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerStarted","Data":"cc15b53040307392520c6bf0afceeeae3fa7b4ede8c4b865650ef1bade4c4e37"} Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.590578 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lxpbs" event={"ID":"5ddd4e16-7034-4925-8bba-2320640dd8b7","Type":"ContainerStarted","Data":"b5e0350017d7281404c22de7f53157415a778f82bb05ed7754c2ad736056d7e3"} Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.594201 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" event={"ID":"f76d819d-a178-42b5-bc1d-642c8684a05b","Type":"ContainerStarted","Data":"836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a"} Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.594421 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.596299 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-log" containerID="cri-o://a2fb9d3f2721062df8346d68d8ee9af9d4ba452dfc9e2048ab3e38c7a86ecb4b" gracePeriod=30 Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.596456 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-httpd" containerID="cri-o://9329556c92ed5aba8950fdfd59f03322f7fe49cb2846b2d4821c4886e470bbdc" gracePeriod=30 Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.596758 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerStarted","Data":"9329556c92ed5aba8950fdfd59f03322f7fe49cb2846b2d4821c4886e470bbdc"} Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.601566 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.612402 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-lxpbs" podStartSLOduration=6.787908478 podStartE2EDuration="39.612384976s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="2026-02-18 00:56:24.72852321 +0000 UTC m=+1326.296536380" lastFinishedPulling="2026-02-18 00:56:57.552999708 +0000 UTC m=+1359.121012878" observedRunningTime="2026-02-18 00:57:01.604206033 +0000 UTC m=+1363.172219203" watchObservedRunningTime="2026-02-18 00:57:01.612384976 +0000 UTC m=+1363.180398136" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.640331 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=28.640295231 podStartE2EDuration="28.640295231s" podCreationTimestamp="2026-02-18 00:56:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:01.624011666 +0000 UTC m=+1363.192024836" watchObservedRunningTime="2026-02-18 00:57:01.640295231 +0000 UTC m=+1363.208308751" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.670071 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=26.670053404 podStartE2EDuration="26.670053404s" podCreationTimestamp="2026-02-18 00:56:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:01.649138295 +0000 UTC m=+1363.217151475" watchObservedRunningTime="2026-02-18 00:57:01.670053404 +0000 UTC m=+1363.238066574" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.671284 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" podStartSLOduration=7.671274431 podStartE2EDuration="7.671274431s" podCreationTimestamp="2026-02-18 00:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:01.663897143 +0000 UTC m=+1363.231910313" watchObservedRunningTime="2026-02-18 00:57:01.671274431 +0000 UTC m=+1363.239287601" Feb 18 00:57:01 crc kubenswrapper[4791]: I0218 00:57:01.830995 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.626145 4791 generic.go:334] "Generic (PLEG): container finished" podID="082acdfd-08b8-4986-8091-22d29ab897f3" containerID="402e56035ac323209b0d643883d293c8bf2dae4d527683ce023127f2618db865" exitCode=0 Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.626734 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld9m6" event={"ID":"082acdfd-08b8-4986-8091-22d29ab897f3","Type":"ContainerDied","Data":"402e56035ac323209b0d643883d293c8bf2dae4d527683ce023127f2618db865"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.635003 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerStarted","Data":"a620b3185c82615b52303d4e4281a904fe9288345b93edc95bb400d64fb83874"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.635048 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerStarted","Data":"396bc762c002400aa7b56e18187ef4bbbf71ce620b59b82f1c78433a285534c1"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.643065 4791 generic.go:334] "Generic (PLEG): container finished" podID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerID="9329556c92ed5aba8950fdfd59f03322f7fe49cb2846b2d4821c4886e470bbdc" exitCode=0 Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.643098 4791 generic.go:334] "Generic (PLEG): container finished" podID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerID="a2fb9d3f2721062df8346d68d8ee9af9d4ba452dfc9e2048ab3e38c7a86ecb4b" exitCode=143 Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.644270 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerDied","Data":"9329556c92ed5aba8950fdfd59f03322f7fe49cb2846b2d4821c4886e470bbdc"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.644301 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerDied","Data":"a2fb9d3f2721062df8346d68d8ee9af9d4ba452dfc9e2048ab3e38c7a86ecb4b"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.644312 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9d59b5e1-a833-4da4-ad0e-6315320cefbc","Type":"ContainerDied","Data":"3889b552ff289a28e5e4cc12420115972d2ee078f45b58a477b98b82f546f9ba"} Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.644321 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3889b552ff289a28e5e4cc12420115972d2ee078f45b58a477b98b82f546f9ba" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.663723 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.755913 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756353 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756391 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756542 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756717 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgrv7\" (UniqueName: \"kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756768 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.756816 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data\") pod \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\" (UID: \"9d59b5e1-a833-4da4-ad0e-6315320cefbc\") " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.763519 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs" (OuterVolumeSpecName: "logs") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.764293 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.768879 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7" (OuterVolumeSpecName: "kube-api-access-tgrv7") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "kube-api-access-tgrv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.781339 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts" (OuterVolumeSpecName: "scripts") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.811692 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (OuterVolumeSpecName: "glance") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.826400 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.856691 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data" (OuterVolumeSpecName: "config-data") pod "9d59b5e1-a833-4da4-ad0e-6315320cefbc" (UID: "9d59b5e1-a833-4da4-ad0e-6315320cefbc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860376 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgrv7\" (UniqueName: \"kubernetes.io/projected/9d59b5e1-a833-4da4-ad0e-6315320cefbc-kube-api-access-tgrv7\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860401 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860411 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860421 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860429 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d59b5e1-a833-4da4-ad0e-6315320cefbc-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860439 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d59b5e1-a833-4da4-ad0e-6315320cefbc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.860463 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" " Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.957615 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.957762 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5") on node "crc" Feb 18 00:57:02 crc kubenswrapper[4791]: I0218 00:57:02.962925 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.661671 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerStarted","Data":"7430a4fec703db85e10c20156d2b511933405b2404b2435e4746ddafbfb84c78"} Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.663672 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.666469 4791 generic.go:334] "Generic (PLEG): container finished" podID="69958a35-9d4e-4e70-b21b-525ffef5d9da" containerID="d1179326184c0774c035050ce21c7a60bc5d040a13893bf2844e7608b33a0205" exitCode=0 Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.666519 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pjx58" event={"ID":"69958a35-9d4e-4e70-b21b-525ffef5d9da","Type":"ContainerDied","Data":"d1179326184c0774c035050ce21c7a60bc5d040a13893bf2844e7608b33a0205"} Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.666589 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.695089 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5ff469dfbc-w7krb" podStartSLOduration=5.695070497 podStartE2EDuration="5.695070497s" podCreationTimestamp="2026-02-18 00:56:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:03.689168865 +0000 UTC m=+1365.257182035" watchObservedRunningTime="2026-02-18 00:57:03.695070497 +0000 UTC m=+1365.263083657" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.720470 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.760444 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.787332 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:57:03 crc kubenswrapper[4791]: E0218 00:57:03.787851 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-httpd" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.787873 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-httpd" Feb 18 00:57:03 crc kubenswrapper[4791]: E0218 00:57:03.787917 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-log" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.787924 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-log" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.788188 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-httpd" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.788222 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" containerName="glance-log" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.789436 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.798211 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.816866 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.827982 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.923689 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.924090 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.924139 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.927934 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.928022 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.928211 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.928299 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:03 crc kubenswrapper[4791]: I0218 00:57:03.928339 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2nsc\" (UniqueName: \"kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030638 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030678 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2nsc\" (UniqueName: \"kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030738 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030780 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030798 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030817 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.030847 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.031498 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.031864 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.036528 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.036560 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9790f76315acc35c8fd6eab9a9221b467468b56b96e913c660367fc7d70a609d/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.037418 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.039426 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.054412 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.058359 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.058636 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2nsc\" (UniqueName: \"kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.152182 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.155138 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld9m6" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.233731 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts\") pod \"082acdfd-08b8-4986-8091-22d29ab897f3\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.233812 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data\") pod \"082acdfd-08b8-4986-8091-22d29ab897f3\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.234064 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpdlh\" (UniqueName: \"kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh\") pod \"082acdfd-08b8-4986-8091-22d29ab897f3\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.234221 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs\") pod \"082acdfd-08b8-4986-8091-22d29ab897f3\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.234476 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle\") pod \"082acdfd-08b8-4986-8091-22d29ab897f3\" (UID: \"082acdfd-08b8-4986-8091-22d29ab897f3\") " Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.235926 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs" (OuterVolumeSpecName: "logs") pod "082acdfd-08b8-4986-8091-22d29ab897f3" (UID: "082acdfd-08b8-4986-8091-22d29ab897f3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.237882 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh" (OuterVolumeSpecName: "kube-api-access-gpdlh") pod "082acdfd-08b8-4986-8091-22d29ab897f3" (UID: "082acdfd-08b8-4986-8091-22d29ab897f3"). InnerVolumeSpecName "kube-api-access-gpdlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.247387 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts" (OuterVolumeSpecName: "scripts") pod "082acdfd-08b8-4986-8091-22d29ab897f3" (UID: "082acdfd-08b8-4986-8091-22d29ab897f3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.281882 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data" (OuterVolumeSpecName: "config-data") pod "082acdfd-08b8-4986-8091-22d29ab897f3" (UID: "082acdfd-08b8-4986-8091-22d29ab897f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.308973 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "082acdfd-08b8-4986-8091-22d29ab897f3" (UID: "082acdfd-08b8-4986-8091-22d29ab897f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.337120 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.337182 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.337195 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082acdfd-08b8-4986-8091-22d29ab897f3-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.337207 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpdlh\" (UniqueName: \"kubernetes.io/projected/082acdfd-08b8-4986-8091-22d29ab897f3-kube-api-access-gpdlh\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.337220 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/082acdfd-08b8-4986-8091-22d29ab897f3-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.406348 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.686145 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ld9m6" event={"ID":"082acdfd-08b8-4986-8091-22d29ab897f3","Type":"ContainerDied","Data":"d0f5bf6d9bab479f6420fc59ac95a297dab72e8047d930a81370f41d0d538aa7"} Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.686517 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0f5bf6d9bab479f6420fc59ac95a297dab72e8047d930a81370f41d0d538aa7" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.686414 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ld9m6" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.739104 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:57:04 crc kubenswrapper[4791]: E0218 00:57:04.739830 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="082acdfd-08b8-4986-8091-22d29ab897f3" containerName="placement-db-sync" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.739849 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="082acdfd-08b8-4986-8091-22d29ab897f3" containerName="placement-db-sync" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.740105 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="082acdfd-08b8-4986-8091-22d29ab897f3" containerName="placement-db-sync" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.741420 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.745051 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-kjnsb" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.745236 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.751297 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.751563 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.764011 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.764438 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.856460 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.856501 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.856693 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.856726 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.856890 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf5l2\" (UniqueName: \"kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.857015 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.857090 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.962897 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf5l2\" (UniqueName: \"kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963339 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963405 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963469 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963490 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963602 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.963888 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.970062 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.974488 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.975561 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.975602 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.975800 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:04 crc kubenswrapper[4791]: I0218 00:57:04.979526 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf5l2\" (UniqueName: \"kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2\") pod \"placement-956c96f98-prlnm\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.024971 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.074611 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d59b5e1-a833-4da4-ad0e-6315320cefbc" path="/var/lib/kubelet/pods/9d59b5e1-a833-4da4-ad0e-6315320cefbc/volumes" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.084242 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.313291 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.409282 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.409705 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="dnsmasq-dns" containerID="cri-o://2014c27c4ab964764f2316f2379b6cd8124bba1b1c25ac92580a34f5c63d1cfd" gracePeriod=10 Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.527499 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.527815 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.527827 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.527838 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.578565 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.591889 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.751493 4791 generic.go:334] "Generic (PLEG): container finished" podID="e359378c-e000-4599-8f4d-e3169c37a22e" containerID="2014c27c4ab964764f2316f2379b6cd8124bba1b1c25ac92580a34f5c63d1cfd" exitCode=0 Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.751581 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" event={"ID":"e359378c-e000-4599-8f4d-e3169c37a22e","Type":"ContainerDied","Data":"2014c27c4ab964764f2316f2379b6cd8124bba1b1c25ac92580a34f5c63d1cfd"} Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.771994 4791 generic.go:334] "Generic (PLEG): container finished" podID="5ddd4e16-7034-4925-8bba-2320640dd8b7" containerID="b5e0350017d7281404c22de7f53157415a778f82bb05ed7754c2ad736056d7e3" exitCode=0 Feb 18 00:57:05 crc kubenswrapper[4791]: I0218 00:57:05.772923 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lxpbs" event={"ID":"5ddd4e16-7034-4925-8bba-2320640dd8b7","Type":"ContainerDied","Data":"b5e0350017d7281404c22de7f53157415a778f82bb05ed7754c2ad736056d7e3"} Feb 18 00:57:08 crc kubenswrapper[4791]: I0218 00:57:08.887734 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.188:5353: connect: connection refused" Feb 18 00:57:08 crc kubenswrapper[4791]: I0218 00:57:08.989627 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 18 00:57:08 crc kubenswrapper[4791]: I0218 00:57:08.989731 4791 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.036276 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 18 00:57:09 crc kubenswrapper[4791]: W0218 00:57:09.573437 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88f5c47b_706b_4d5b_9822_56c13e90a7a9.slice/crio-48446f99549ad5f7da62ca57e2e129f93b731f12ecae9599ac0df2a974fb2a15 WatchSource:0}: Error finding container 48446f99549ad5f7da62ca57e2e129f93b731f12ecae9599ac0df2a974fb2a15: Status 404 returned error can't find the container with id 48446f99549ad5f7da62ca57e2e129f93b731f12ecae9599ac0df2a974fb2a15 Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.803538 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.804396 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.866415 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pjx58" event={"ID":"69958a35-9d4e-4e70-b21b-525ffef5d9da","Type":"ContainerDied","Data":"7921f3424b7c06552782a6fa857761505524e5ef046f5f6e9d2c12d8ad2c9bbe"} Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.866587 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7921f3424b7c06552782a6fa857761505524e5ef046f5f6e9d2c12d8ad2c9bbe" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.866663 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pjx58" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.874780 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerStarted","Data":"48446f99549ad5f7da62ca57e2e129f93b731f12ecae9599ac0df2a974fb2a15"} Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.875545 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.875577 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.875641 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmg9k\" (UniqueName: \"kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.875700 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle\") pod \"5ddd4e16-7034-4925-8bba-2320640dd8b7\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.899245 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.902283 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lxpbs" event={"ID":"5ddd4e16-7034-4925-8bba-2320640dd8b7","Type":"ContainerDied","Data":"02621d95b1e2f1ded23ba873dd8279bd620bfbd6eb3aa64a75a83f07b6b94eee"} Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.902330 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02621d95b1e2f1ded23ba873dd8279bd620bfbd6eb3aa64a75a83f07b6b94eee" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.905629 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lxpbs" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.910612 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k" (OuterVolumeSpecName: "kube-api-access-xmg9k") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "kube-api-access-xmg9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.952957 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ddd4e16-7034-4925-8bba-2320640dd8b7" (UID: "5ddd4e16-7034-4925-8bba-2320640dd8b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.982319 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data" (OuterVolumeSpecName: "config-data") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.991529 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.991605 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwp5g\" (UniqueName: \"kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g\") pod \"5ddd4e16-7034-4925-8bba-2320640dd8b7\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.991629 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.991996 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle\") pod \"69958a35-9d4e-4e70-b21b-525ffef5d9da\" (UID: \"69958a35-9d4e-4e70-b21b-525ffef5d9da\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.992024 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data\") pod \"5ddd4e16-7034-4925-8bba-2320640dd8b7\" (UID: \"5ddd4e16-7034-4925-8bba-2320640dd8b7\") " Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.998267 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:09 crc kubenswrapper[4791]: I0218 00:57:09.998593 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts" (OuterVolumeSpecName: "scripts") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.003699 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g" (OuterVolumeSpecName: "kube-api-access-mwp5g") pod "5ddd4e16-7034-4925-8bba-2320640dd8b7" (UID: "5ddd4e16-7034-4925-8bba-2320640dd8b7"). InnerVolumeSpecName "kube-api-access-mwp5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007260 4791 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007290 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwp5g\" (UniqueName: \"kubernetes.io/projected/5ddd4e16-7034-4925-8bba-2320640dd8b7-kube-api-access-mwp5g\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007301 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007309 4791 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-credential-keys\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007413 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007423 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmg9k\" (UniqueName: \"kubernetes.io/projected/69958a35-9d4e-4e70-b21b-525ffef5d9da-kube-api-access-xmg9k\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.007431 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.008261 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5ddd4e16-7034-4925-8bba-2320640dd8b7" (UID: "5ddd4e16-7034-4925-8bba-2320640dd8b7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.037561 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69958a35-9d4e-4e70-b21b-525ffef5d9da" (UID: "69958a35-9d4e-4e70-b21b-525ffef5d9da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.078001 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.108923 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109089 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109155 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109197 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109304 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpv8p\" (UniqueName: \"kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc\") pod \"e359378c-e000-4599-8f4d-e3169c37a22e\" (UID: \"e359378c-e000-4599-8f4d-e3169c37a22e\") " Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.109991 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69958a35-9d4e-4e70-b21b-525ffef5d9da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.110005 4791 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5ddd4e16-7034-4925-8bba-2320640dd8b7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.135600 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p" (OuterVolumeSpecName: "kube-api-access-kpv8p") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "kube-api-access-kpv8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.211962 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpv8p\" (UniqueName: \"kubernetes.io/projected/e359378c-e000-4599-8f4d-e3169c37a22e-kube-api-access-kpv8p\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.347444 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.348805 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.420677 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.468391 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.473541 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.474733 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.480541 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config" (OuterVolumeSpecName: "config") pod "e359378c-e000-4599-8f4d-e3169c37a22e" (UID: "e359378c-e000-4599-8f4d-e3169c37a22e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.523281 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.523316 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.523329 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.523342 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e359378c-e000-4599-8f4d-e3169c37a22e-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.934595 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-85bf744856-fnxxz"] Feb 18 00:57:10 crc kubenswrapper[4791]: E0218 00:57:10.941189 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="init" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941220 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="init" Feb 18 00:57:10 crc kubenswrapper[4791]: E0218 00:57:10.941263 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="dnsmasq-dns" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941270 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="dnsmasq-dns" Feb 18 00:57:10 crc kubenswrapper[4791]: E0218 00:57:10.941281 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69958a35-9d4e-4e70-b21b-525ffef5d9da" containerName="keystone-bootstrap" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941287 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="69958a35-9d4e-4e70-b21b-525ffef5d9da" containerName="keystone-bootstrap" Feb 18 00:57:10 crc kubenswrapper[4791]: E0218 00:57:10.941301 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" containerName="barbican-db-sync" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941308 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" containerName="barbican-db-sync" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941574 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" containerName="dnsmasq-dns" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941595 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="69958a35-9d4e-4e70-b21b-525ffef5d9da" containerName="keystone-bootstrap" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.941608 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" containerName="barbican-db-sync" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.942448 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.944689 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.945053 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.945186 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49g64" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.945298 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.945438 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.945546 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.964548 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-85bf744856-fnxxz"] Feb 18 00:57:10 crc kubenswrapper[4791]: I0218 00:57:10.994823 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-g5ccz" event={"ID":"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8","Type":"ContainerStarted","Data":"2ad408151a4680c068a341aaaf8055b47f2bfe9f3d2495c19353d4be7a9abdc1"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.009400 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerStarted","Data":"aba8ab6d89e6ccce846772572bb70879600f867434c61a8143a38ae0f130b5f2"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.009443 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerStarted","Data":"6bb7b85fa984f0e83badbe2cb98f555dd541551bc3359960fd440a4959689036"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.012876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerStarted","Data":"aa476e5d026dcf8aac531d64b46fe9baa0c3e19528ba4c0aaf44c74767c825ee"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.021053 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-g5ccz" podStartSLOduration=4.257840023 podStartE2EDuration="49.021036314s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="2026-02-18 00:56:25.064044508 +0000 UTC m=+1326.632057678" lastFinishedPulling="2026-02-18 00:57:09.827240799 +0000 UTC m=+1371.395253969" observedRunningTime="2026-02-18 00:57:11.02057747 +0000 UTC m=+1372.588590640" watchObservedRunningTime="2026-02-18 00:57:11.021036314 +0000 UTC m=+1372.589049474" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.021539 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" event={"ID":"e359378c-e000-4599-8f4d-e3169c37a22e","Type":"ContainerDied","Data":"12b580499651f5c577fca2e4341d8e7d8e7b3c42627bce0c62974a8faf4c6861"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.021586 4791 scope.go:117] "RemoveContainer" containerID="2014c27c4ab964764f2316f2379b6cd8124bba1b1c25ac92580a34f5c63d1cfd" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.021749 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-mfz79" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033385 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-public-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033453 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-fernet-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033486 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-combined-ca-bundle\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033521 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-credential-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033552 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-scripts\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033644 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44b4j\" (UniqueName: \"kubernetes.io/projected/ecce6854-dfe6-4480-9248-190d2eacff79-kube-api-access-44b4j\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033670 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-config-data\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.033736 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-internal-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.047364 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerStarted","Data":"582a216500d9a6b639bae64411291d499bbc933af54e6099e2a8928ff963d396"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.061990 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-k7jrj" event={"ID":"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0","Type":"ContainerStarted","Data":"b3da85a3e4cce9809241c52b63808dc7059fd6a7d9d963b4eb4608fc4c3e1399"} Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.095543 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-k7jrj" podStartSLOduration=3.519620735 podStartE2EDuration="49.095523602s" podCreationTimestamp="2026-02-18 00:56:22 +0000 UTC" firstStartedPulling="2026-02-18 00:56:24.25512219 +0000 UTC m=+1325.823135360" lastFinishedPulling="2026-02-18 00:57:09.831025057 +0000 UTC m=+1371.399038227" observedRunningTime="2026-02-18 00:57:11.087714441 +0000 UTC m=+1372.655727611" watchObservedRunningTime="2026-02-18 00:57:11.095523602 +0000 UTC m=+1372.663536772" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143117 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-internal-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143510 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-public-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143606 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-fernet-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143659 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-combined-ca-bundle\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143701 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-credential-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143753 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-scripts\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143840 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44b4j\" (UniqueName: \"kubernetes.io/projected/ecce6854-dfe6-4480-9248-190d2eacff79-kube-api-access-44b4j\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.143865 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-config-data\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.159315 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-scripts\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.159971 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-credential-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.160306 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-combined-ca-bundle\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.164039 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-config-data\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.164783 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-public-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.165565 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-fernet-keys\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.180061 4791 scope.go:117] "RemoveContainer" containerID="1338edf2ac576ae9a7f8d612b65496f573822b69899fdddb2d71e5898952f470" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.192767 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44b4j\" (UniqueName: \"kubernetes.io/projected/ecce6854-dfe6-4480-9248-190d2eacff79-kube-api-access-44b4j\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.193615 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecce6854-dfe6-4480-9248-190d2eacff79-internal-tls-certs\") pod \"keystone-85bf744856-fnxxz\" (UID: \"ecce6854-dfe6-4480-9248-190d2eacff79\") " pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.238080 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.274745 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.309205 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-mfz79"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.346750 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.348674 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.352579 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.352846 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-fqnht" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.358649 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.364734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.364792 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.364831 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.364863 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hb59\" (UniqueName: \"kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.364930 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.405781 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.407524 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.409271 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.428894 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.448331 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470654 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470768 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hb59\" (UniqueName: \"kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470796 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470864 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470960 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.470989 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5l4n\" (UniqueName: \"kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.471035 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.471067 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.471305 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.471391 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.473965 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.486504 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.489925 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.502709 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.503986 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.504055 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-c544fb58c-t65m4"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.505841 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.510087 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.516490 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hb59\" (UniqueName: \"kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59\") pod \"barbican-keystone-listener-54d4bd8c58-swbp6\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.516560 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.545971 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c544fb58c-t65m4"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.590788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591101 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5l4n\" (UniqueName: \"kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591167 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591206 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9928q\" (UniqueName: \"kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591335 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591359 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-combined-ca-bundle\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591442 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data-custom\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.591497 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mfgb\" (UniqueName: \"kubernetes.io/projected/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-kube-api-access-2mfgb\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592204 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592347 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-logs\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592399 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592426 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592499 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592522 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592540 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.592587 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.596127 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.596291 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-54d8d8c67d-vxfgq"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.618492 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.627206 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.627504 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.627591 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.638073 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-54d8d8c67d-vxfgq"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.649915 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5l4n\" (UniqueName: \"kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n\") pod \"barbican-worker-57447cd8ff-krgft\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705399 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705634 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-logs\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705703 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzpkm\" (UniqueName: \"kubernetes.io/projected/2c364d42-b757-4c65-a010-5db856347830-kube-api-access-pzpkm\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705733 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705820 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705841 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705880 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data-custom\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705931 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.705997 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706072 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9928q\" (UniqueName: \"kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706079 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706248 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-combined-ca-bundle\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706281 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-combined-ca-bundle\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706307 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706333 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data-custom\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706394 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mfgb\" (UniqueName: \"kubernetes.io/projected/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-kube-api-access-2mfgb\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.706437 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c364d42-b757-4c65-a010-5db856347830-logs\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.713035 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.719362 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data-custom\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.721073 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.722311 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.722591 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-logs\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.723352 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.724919 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.725122 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-config-data\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.730035 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-combined-ca-bundle\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.749948 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.758666 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9928q\" (UniqueName: \"kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q\") pod \"dnsmasq-dns-75c8ddd69c-cn78k\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.759210 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.762466 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.765943 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.775613 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mfgb\" (UniqueName: \"kubernetes.io/projected/db6663e2-3aa9-4ddc-8e0a-e4647fccd511-kube-api-access-2mfgb\") pod \"barbican-worker-c544fb58c-t65m4\" (UID: \"db6663e2-3aa9-4ddc-8e0a-e4647fccd511\") " pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.801239 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.807909 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-combined-ca-bundle\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.807964 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c364d42-b757-4c65-a010-5db856347830-logs\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808010 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808034 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808068 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808121 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzpkm\" (UniqueName: \"kubernetes.io/projected/2c364d42-b757-4c65-a010-5db856347830-kube-api-access-pzpkm\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808145 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vtsq\" (UniqueName: \"kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808191 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data-custom\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808228 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.808264 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.811072 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c364d42-b757-4c65-a010-5db856347830-logs\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.813816 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data-custom\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.815958 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-combined-ca-bundle\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.841097 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.857213 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c364d42-b757-4c65-a010-5db856347830-config-data\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.863446 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-c544fb58c-t65m4" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.863955 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzpkm\" (UniqueName: \"kubernetes.io/projected/2c364d42-b757-4c65-a010-5db856347830-kube-api-access-pzpkm\") pod \"barbican-keystone-listener-54d8d8c67d-vxfgq\" (UID: \"2c364d42-b757-4c65-a010-5db856347830\") " pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.880869 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.909735 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.909851 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vtsq\" (UniqueName: \"kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.909956 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.910107 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.910135 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.926422 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.926646 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.926968 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.949677 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:11 crc kubenswrapper[4791]: I0218 00:57:11.954004 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vtsq\" (UniqueName: \"kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq\") pod \"barbican-api-54d9c7b78-mfjzv\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.147275 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerStarted","Data":"44616c0ca617af27909f2cdd1af40974c7443bf98100ca11a1e3b3c5f50a56f9"} Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.149149 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.149197 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.172947 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-85bf744856-fnxxz"] Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.197369 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-956c96f98-prlnm" podStartSLOduration=8.197353577 podStartE2EDuration="8.197353577s" podCreationTimestamp="2026-02-18 00:57:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:12.194747346 +0000 UTC m=+1373.762760516" watchObservedRunningTime="2026-02-18 00:57:12.197353577 +0000 UTC m=+1373.765366747" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.228729 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.239382 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerStarted","Data":"88ea5f88ed7bfed3311bc6d29fa7c4418d1dd0ebeb5bc50dfe90c18b8aa803a7"} Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.280012 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.279997218 podStartE2EDuration="9.279997218s" podCreationTimestamp="2026-02-18 00:57:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:12.27845383 +0000 UTC m=+1373.846467000" watchObservedRunningTime="2026-02-18 00:57:12.279997218 +0000 UTC m=+1373.848010378" Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.570995 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:12 crc kubenswrapper[4791]: I0218 00:57:12.820122 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:12 crc kubenswrapper[4791]: W0218 00:57:12.838519 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf500f20d_afc6_4646_8ed4_59b6897414b3.slice/crio-de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181 WatchSource:0}: Error finding container de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181: Status 404 returned error can't find the container with id de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181 Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.041031 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:13 crc kubenswrapper[4791]: W0218 00:57:13.066492 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2aa9956_23e9_4002_befe_79a9244d2149.slice/crio-da041084c2e1634edbea31f361e823339fb29a6ab250913fc9bb7d0695d0d44e WatchSource:0}: Error finding container da041084c2e1634edbea31f361e823339fb29a6ab250913fc9bb7d0695d0d44e: Status 404 returned error can't find the container with id da041084c2e1634edbea31f361e823339fb29a6ab250913fc9bb7d0695d0d44e Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.109263 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e359378c-e000-4599-8f4d-e3169c37a22e" path="/var/lib/kubelet/pods/e359378c-e000-4599-8f4d-e3169c37a22e/volumes" Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.256282 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" event={"ID":"e2aa9956-23e9-4002-befe-79a9244d2149","Type":"ContainerStarted","Data":"da041084c2e1634edbea31f361e823339fb29a6ab250913fc9bb7d0695d0d44e"} Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.258325 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-85bf744856-fnxxz" event={"ID":"ecce6854-dfe6-4480-9248-190d2eacff79","Type":"ContainerStarted","Data":"548bb58491b28ece7b2f2f0a79582bbbd41095993551d3d3a724ed20f9cb8d9b"} Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.258355 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-85bf744856-fnxxz" event={"ID":"ecce6854-dfe6-4480-9248-190d2eacff79","Type":"ContainerStarted","Data":"0723feae677500330819b9bd3d2b62df7ca6dcc05ce572842f29e0c42ad1409f"} Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.258611 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.270839 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerStarted","Data":"73840ef4ff7ba4c9187f9cb2ac603014eddc06f071a2db9daded9ca4b5a2bc79"} Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.280135 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerStarted","Data":"de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181"} Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.292523 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-85bf744856-fnxxz" podStartSLOduration=3.292504525 podStartE2EDuration="3.292504525s" podCreationTimestamp="2026-02-18 00:57:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:13.28944832 +0000 UTC m=+1374.857461490" watchObservedRunningTime="2026-02-18 00:57:13.292504525 +0000 UTC m=+1374.860517695" Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.363066 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.390636 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-c544fb58c-t65m4"] Feb 18 00:57:13 crc kubenswrapper[4791]: I0218 00:57:13.419358 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-54d8d8c67d-vxfgq"] Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.292624 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerStarted","Data":"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.293171 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.293183 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerStarted","Data":"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.293193 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.293202 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerStarted","Data":"ed039516256c3885491c747f776c3a9f5b331fef3dfbfc6be53a16b3de7cc183"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.298691 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c544fb58c-t65m4" event={"ID":"db6663e2-3aa9-4ddc-8e0a-e4647fccd511","Type":"ContainerStarted","Data":"74b3e666261d64e08cf2a07a9933a7115ee11b8acc35b7d638040fdefe724c46"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.304435 4791 generic.go:334] "Generic (PLEG): container finished" podID="e2aa9956-23e9-4002-befe-79a9244d2149" containerID="634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c" exitCode=0 Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.304528 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" event={"ID":"e2aa9956-23e9-4002-befe-79a9244d2149","Type":"ContainerDied","Data":"634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.307075 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" event={"ID":"2c364d42-b757-4c65-a010-5db856347830","Type":"ContainerStarted","Data":"6f377cb9ac102a43ddf824fdea5bdc480ae968d575ddd931a9647836ab15373f"} Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.314088 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-54d9c7b78-mfjzv" podStartSLOduration=3.314071983 podStartE2EDuration="3.314071983s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:14.309698848 +0000 UTC m=+1375.877712018" watchObservedRunningTime="2026-02-18 00:57:14.314071983 +0000 UTC m=+1375.882085153" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.411385 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.411436 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.561482 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.565983 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.658837 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-654c877dd4-hwg2j"] Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.660703 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.663467 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.668533 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.674100 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-654c877dd4-hwg2j"] Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712552 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p49l2\" (UniqueName: \"kubernetes.io/projected/90af684f-c845-4617-a1d0-106ffecccdfc-kube-api-access-p49l2\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712603 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-public-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712627 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-internal-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712724 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data-custom\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712759 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712779 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90af684f-c845-4617-a1d0-106ffecccdfc-logs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.712871 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-combined-ca-bundle\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818469 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-combined-ca-bundle\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818575 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p49l2\" (UniqueName: \"kubernetes.io/projected/90af684f-c845-4617-a1d0-106ffecccdfc-kube-api-access-p49l2\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818594 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-public-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818617 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-internal-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818680 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data-custom\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818710 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.818726 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90af684f-c845-4617-a1d0-106ffecccdfc-logs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.819190 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90af684f-c845-4617-a1d0-106ffecccdfc-logs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.825478 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-public-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.826020 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data-custom\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.838264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-config-data\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.838698 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-internal-tls-certs\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.839032 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90af684f-c845-4617-a1d0-106ffecccdfc-combined-ca-bundle\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.904881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p49l2\" (UniqueName: \"kubernetes.io/projected/90af684f-c845-4617-a1d0-106ffecccdfc-kube-api-access-p49l2\") pod \"barbican-api-654c877dd4-hwg2j\" (UID: \"90af684f-c845-4617-a1d0-106ffecccdfc\") " pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:14 crc kubenswrapper[4791]: I0218 00:57:14.993760 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:15 crc kubenswrapper[4791]: I0218 00:57:15.322463 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:15 crc kubenswrapper[4791]: I0218 00:57:15.322506 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:16 crc kubenswrapper[4791]: I0218 00:57:16.468234 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-654c877dd4-hwg2j"] Feb 18 00:57:16 crc kubenswrapper[4791]: W0218 00:57:16.469716 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90af684f_c845_4617_a1d0_106ffecccdfc.slice/crio-99308080a9c9e5cf05fe095ae58d67d0907762ada4403517f3cee6ebfb514a2e WatchSource:0}: Error finding container 99308080a9c9e5cf05fe095ae58d67d0907762ada4403517f3cee6ebfb514a2e: Status 404 returned error can't find the container with id 99308080a9c9e5cf05fe095ae58d67d0907762ada4403517f3cee6ebfb514a2e Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.369190 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c544fb58c-t65m4" event={"ID":"db6663e2-3aa9-4ddc-8e0a-e4647fccd511","Type":"ContainerStarted","Data":"5491d26b5d498e0ecf1041830b4c4ab4f2a52fd7bc1e87210488b01d3a257130"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.369437 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-c544fb58c-t65m4" event={"ID":"db6663e2-3aa9-4ddc-8e0a-e4647fccd511","Type":"ContainerStarted","Data":"021916f272f1d6bd3b667c434b09f7c2c8ca2c1f0a6ac590621c1296d367f461"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.378751 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" event={"ID":"e2aa9956-23e9-4002-befe-79a9244d2149","Type":"ContainerStarted","Data":"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.378895 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.403751 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" event={"ID":"2c364d42-b757-4c65-a010-5db856347830","Type":"ContainerStarted","Data":"5c4a690256a13c7799aa065a329876fb4e5d6466c0869d55c56b2ef564a4f264"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.403796 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" event={"ID":"2c364d42-b757-4c65-a010-5db856347830","Type":"ContainerStarted","Data":"a389c4310f0dd2b3f5683809afddfb7873db24b4e431d6e767ab95dc999449d9"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.424164 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-c544fb58c-t65m4" podStartSLOduration=3.952335811 podStartE2EDuration="6.424133621s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="2026-02-18 00:57:13.453809574 +0000 UTC m=+1375.021822744" lastFinishedPulling="2026-02-18 00:57:15.925607384 +0000 UTC m=+1377.493620554" observedRunningTime="2026-02-18 00:57:17.41571838 +0000 UTC m=+1378.983731550" watchObservedRunningTime="2026-02-18 00:57:17.424133621 +0000 UTC m=+1378.992146791" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.436818 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerStarted","Data":"c46bbf3c6e216e41d39c408c458c51b28e0e4d172a02e1c7b1fe4d777a6ed942"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.436862 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerStarted","Data":"b70cea53289af40be549b0539d24b881d72ecd7bda3b4c271eb10c4ba2763f03"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.467055 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerStarted","Data":"90ed4e8dc4679ea6fcf919bb31e3b7a83c8aa65e8912ee66ffd34e54dc37a365"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.467097 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerStarted","Data":"639e940835e4551da6a4c6a12bf4308ae41111a0d11b66916ddbbe88b718402b"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.469352 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-54d8d8c67d-vxfgq" podStartSLOduration=4.06681921 podStartE2EDuration="6.469331862s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="2026-02-18 00:57:13.524459883 +0000 UTC m=+1375.092473053" lastFinishedPulling="2026-02-18 00:57:15.926972535 +0000 UTC m=+1377.494985705" observedRunningTime="2026-02-18 00:57:17.44312482 +0000 UTC m=+1379.011137990" watchObservedRunningTime="2026-02-18 00:57:17.469331862 +0000 UTC m=+1379.037345022" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.472467 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" podStartSLOduration=6.472457399 podStartE2EDuration="6.472457399s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:17.46667373 +0000 UTC m=+1379.034686900" watchObservedRunningTime="2026-02-18 00:57:17.472457399 +0000 UTC m=+1379.040470579" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.479368 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-654c877dd4-hwg2j" event={"ID":"90af684f-c845-4617-a1d0-106ffecccdfc","Type":"ContainerStarted","Data":"944198a97df00ae29421ee5b4f1ae8caf7a3d82a3df1992e0d37b18569972025"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.479416 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-654c877dd4-hwg2j" event={"ID":"90af684f-c845-4617-a1d0-106ffecccdfc","Type":"ContainerStarted","Data":"e2f73208d1dde217ae3ff45a208e79f3f39889f55f6d508f5e43a952840d4584"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.479427 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-654c877dd4-hwg2j" event={"ID":"90af684f-c845-4617-a1d0-106ffecccdfc","Type":"ContainerStarted","Data":"99308080a9c9e5cf05fe095ae58d67d0907762ada4403517f3cee6ebfb514a2e"} Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.479830 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.480427 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.490677 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.504025 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.520650 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" podStartSLOduration=3.246249491 podStartE2EDuration="6.520627542s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="2026-02-18 00:57:12.651428459 +0000 UTC m=+1374.219441629" lastFinishedPulling="2026-02-18 00:57:15.92580651 +0000 UTC m=+1377.493819680" observedRunningTime="2026-02-18 00:57:17.512529581 +0000 UTC m=+1379.080542751" watchObservedRunningTime="2026-02-18 00:57:17.520627542 +0000 UTC m=+1379.088640712" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.547102 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-654c877dd4-hwg2j" podStartSLOduration=3.5470850819999997 podStartE2EDuration="3.547085082s" podCreationTimestamp="2026-02-18 00:57:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:17.536231925 +0000 UTC m=+1379.104245095" watchObservedRunningTime="2026-02-18 00:57:17.547085082 +0000 UTC m=+1379.115098242" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.561032 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-57447cd8ff-krgft" podStartSLOduration=3.455703983 podStartE2EDuration="6.561016954s" podCreationTimestamp="2026-02-18 00:57:11 +0000 UTC" firstStartedPulling="2026-02-18 00:57:12.840709735 +0000 UTC m=+1374.408722895" lastFinishedPulling="2026-02-18 00:57:15.946022696 +0000 UTC m=+1377.514035866" observedRunningTime="2026-02-18 00:57:17.558600558 +0000 UTC m=+1379.126613718" watchObservedRunningTime="2026-02-18 00:57:17.561016954 +0000 UTC m=+1379.129030124" Feb 18 00:57:17 crc kubenswrapper[4791]: I0218 00:57:17.960932 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:18 crc kubenswrapper[4791]: I0218 00:57:18.494947 4791 generic.go:334] "Generic (PLEG): container finished" podID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" containerID="2ad408151a4680c068a341aaaf8055b47f2bfe9f3d2495c19353d4be7a9abdc1" exitCode=0 Feb 18 00:57:18 crc kubenswrapper[4791]: I0218 00:57:18.494983 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-g5ccz" event={"ID":"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8","Type":"ContainerDied","Data":"2ad408151a4680c068a341aaaf8055b47f2bfe9f3d2495c19353d4be7a9abdc1"} Feb 18 00:57:18 crc kubenswrapper[4791]: I0218 00:57:18.880478 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 18 00:57:19 crc kubenswrapper[4791]: I0218 00:57:19.505091 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener-log" containerID="cri-o://b70cea53289af40be549b0539d24b881d72ecd7bda3b4c271eb10c4ba2763f03" gracePeriod=30 Feb 18 00:57:19 crc kubenswrapper[4791]: I0218 00:57:19.505137 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener" containerID="cri-o://c46bbf3c6e216e41d39c408c458c51b28e0e4d172a02e1c7b1fe4d777a6ed942" gracePeriod=30 Feb 18 00:57:19 crc kubenswrapper[4791]: I0218 00:57:19.505188 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-57447cd8ff-krgft" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker" containerID="cri-o://90ed4e8dc4679ea6fcf919bb31e3b7a83c8aa65e8912ee66ffd34e54dc37a365" gracePeriod=30 Feb 18 00:57:19 crc kubenswrapper[4791]: I0218 00:57:19.505204 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-57447cd8ff-krgft" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker-log" containerID="cri-o://639e940835e4551da6a4c6a12bf4308ae41111a0d11b66916ddbbe88b718402b" gracePeriod=30 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.238651 4791 scope.go:117] "RemoveContainer" containerID="657a2047bbfd536c3abb0d5470bcc81b4be955c69fb276302594789ad69fbd41" Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.519050 4791 generic.go:334] "Generic (PLEG): container finished" podID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerID="c46bbf3c6e216e41d39c408c458c51b28e0e4d172a02e1c7b1fe4d777a6ed942" exitCode=0 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.519087 4791 generic.go:334] "Generic (PLEG): container finished" podID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerID="b70cea53289af40be549b0539d24b881d72ecd7bda3b4c271eb10c4ba2763f03" exitCode=143 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.519142 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerDied","Data":"c46bbf3c6e216e41d39c408c458c51b28e0e4d172a02e1c7b1fe4d777a6ed942"} Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.519216 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerDied","Data":"b70cea53289af40be549b0539d24b881d72ecd7bda3b4c271eb10c4ba2763f03"} Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.522050 4791 generic.go:334] "Generic (PLEG): container finished" podID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerID="90ed4e8dc4679ea6fcf919bb31e3b7a83c8aa65e8912ee66ffd34e54dc37a365" exitCode=0 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.522080 4791 generic.go:334] "Generic (PLEG): container finished" podID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerID="639e940835e4551da6a4c6a12bf4308ae41111a0d11b66916ddbbe88b718402b" exitCode=143 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.522146 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerDied","Data":"90ed4e8dc4679ea6fcf919bb31e3b7a83c8aa65e8912ee66ffd34e54dc37a365"} Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.522184 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerDied","Data":"639e940835e4551da6a4c6a12bf4308ae41111a0d11b66916ddbbe88b718402b"} Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.524822 4791 generic.go:334] "Generic (PLEG): container finished" podID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" containerID="b3da85a3e4cce9809241c52b63808dc7059fd6a7d9d963b4eb4608fc4c3e1399" exitCode=0 Feb 18 00:57:20 crc kubenswrapper[4791]: I0218 00:57:20.524894 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-k7jrj" event={"ID":"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0","Type":"ContainerDied","Data":"b3da85a3e4cce9809241c52b63808dc7059fd6a7d9d963b4eb4608fc4c3e1399"} Feb 18 00:57:21 crc kubenswrapper[4791]: I0218 00:57:21.843281 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:21 crc kubenswrapper[4791]: I0218 00:57:21.913798 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:57:21 crc kubenswrapper[4791]: I0218 00:57:21.914117 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="dnsmasq-dns" containerID="cri-o://836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a" gracePeriod=10 Feb 18 00:57:22 crc kubenswrapper[4791]: E0218 00:57:22.168345 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf76d819d_a178_42b5_bc1d_642c8684a05b.slice/crio-836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a.scope\": RecentStats: unable to find data in memory cache]" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.406487 4791 scope.go:117] "RemoveContainer" containerID="26dcfd57c538f0c9b3d5b808a47edf73e5d9b1d6c2549a8f9d31eaa585f364d5" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.528433 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.568884 4791 generic.go:334] "Generic (PLEG): container finished" podID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerID="836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a" exitCode=0 Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.568965 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" event={"ID":"f76d819d-a178-42b5-bc1d-642c8684a05b","Type":"ContainerDied","Data":"836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a"} Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575006 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575061 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575218 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zbxc\" (UniqueName: \"kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575385 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575403 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575484 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.575507 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data\") pod \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\" (UID: \"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0\") " Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.576002 4791 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.607748 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-k7jrj" event={"ID":"e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0","Type":"ContainerDied","Data":"3a68de1ab1b03269611951f0ab2b3f7a708a644f28f834d646fbd21b7f0c9f60"} Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.608046 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a68de1ab1b03269611951f0ab2b3f7a708a644f28f834d646fbd21b7f0c9f60" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.608108 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-k7jrj" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.620115 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts" (OuterVolumeSpecName: "scripts") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.623832 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc" (OuterVolumeSpecName: "kube-api-access-4zbxc") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "kube-api-access-4zbxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.629496 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.657032 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.661831 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data" (OuterVolumeSpecName: "config-data") pod "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" (UID: "e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.677020 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.677054 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.677067 4791 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.677077 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.677085 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zbxc\" (UniqueName: \"kubernetes.io/projected/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0-kube-api-access-4zbxc\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.847493 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:57:22 crc kubenswrapper[4791]: E0218 00:57:22.847963 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" containerName="cinder-db-sync" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.847975 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" containerName="cinder-db-sync" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.854147 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" containerName="cinder-db-sync" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.855545 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884096 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884200 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884241 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b46wt\" (UniqueName: \"kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884257 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884304 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.884322 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.893584 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.895404 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.904493 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.962249 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.986903 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.986973 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bc24\" (UniqueName: \"kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987004 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b46wt\" (UniqueName: \"kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987031 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987055 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987086 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987127 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987167 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.987275 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.988084 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.988702 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.989536 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.989575 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.990125 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.994374 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.994437 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:22 crc kubenswrapper[4791]: I0218 00:57:22.994510 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.001603 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.012993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b46wt\" (UniqueName: \"kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt\") pod \"dnsmasq-dns-5784cf869f-82pnf\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.031650 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.035255 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.039029 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.042669 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.101329 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.104286 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.104319 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.104523 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.104723 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bc24\" (UniqueName: \"kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.104789 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.105144 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.105340 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.105522 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.105899 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.123332 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.126118 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.130601 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.132287 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.142710 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bc24\" (UniqueName: \"kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24\") pod \"cinder-scheduler-0\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.186893 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208498 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208548 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208579 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208632 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208653 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s69kd\" (UniqueName: \"kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208699 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.208769 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.214264 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.214822 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.227014 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.250721 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.312386 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.312434 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s69kd\" (UniqueName: \"kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.312490 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.312647 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.312822 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.315768 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.317974 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.337772 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s69kd\" (UniqueName: \"kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd\") pod \"cinder-api-0\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " pod="openstack/cinder-api-0" Feb 18 00:57:23 crc kubenswrapper[4791]: I0218 00:57:23.367690 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.345064 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.522373 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-g5ccz" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.553734 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.555775 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data\") pod \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.555920 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle\") pod \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.555983 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8kbn\" (UniqueName: \"kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn\") pod \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\" (UID: \"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8\") " Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.564641 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn" (OuterVolumeSpecName: "kube-api-access-j8kbn") pod "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" (UID: "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8"). InnerVolumeSpecName "kube-api-access-j8kbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.654423 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" (UID: "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.659311 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.659340 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8kbn\" (UniqueName: \"kubernetes.io/projected/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-kube-api-access-j8kbn\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.685781 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-g5ccz" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.686022 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-g5ccz" event={"ID":"4f39e5ec-0b51-4c0e-9a95-95c3e69163b8","Type":"ContainerDied","Data":"2800c20b824e4ee5ee52adec5302a11c018aab867e02bed9358b9093de7b90f0"} Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.686058 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2800c20b824e4ee5ee52adec5302a11c018aab867e02bed9358b9093de7b90f0" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.742475 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data" (OuterVolumeSpecName: "config-data") pod "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" (UID: "4f39e5ec-0b51-4c0e-9a95-95c3e69163b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:24 crc kubenswrapper[4791]: I0218 00:57:24.761773 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:25 crc kubenswrapper[4791]: I0218 00:57:25.259688 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:25 crc kubenswrapper[4791]: I0218 00:57:25.722318 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.071177 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.071503 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5ff469dfbc-w7krb" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-api" containerID="cri-o://a620b3185c82615b52303d4e4281a904fe9288345b93edc95bb400d64fb83874" gracePeriod=30 Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.071556 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5ff469dfbc-w7krb" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-httpd" containerID="cri-o://7430a4fec703db85e10c20156d2b511933405b2404b2435e4746ddafbfb84c78" gracePeriod=30 Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.117015 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8475878fc-kbpdv"] Feb 18 00:57:26 crc kubenswrapper[4791]: E0218 00:57:26.124629 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" containerName="heat-db-sync" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.124658 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" containerName="heat-db-sync" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.124889 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" containerName="heat-db-sync" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.131017 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.139296 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8475878fc-kbpdv"] Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.176616 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.304638 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c75w7\" (UniqueName: \"kubernetes.io/projected/62acb3be-36b9-469a-9714-5e29539324dc-kube-api-access-c75w7\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305012 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-combined-ca-bundle\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305047 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-public-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305085 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-internal-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305105 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305201 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-httpd-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.305304 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-ovndb-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.411467 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c75w7\" (UniqueName: \"kubernetes.io/projected/62acb3be-36b9-469a-9714-5e29539324dc-kube-api-access-c75w7\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.411728 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-combined-ca-bundle\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.411822 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-public-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.411929 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-internal-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.412023 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.412242 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-httpd-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.412463 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-ovndb-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.418445 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-combined-ca-bundle\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.418543 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-httpd-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.418706 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-public-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.420313 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-config\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.423821 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-internal-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.425034 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62acb3be-36b9-469a-9714-5e29539324dc-ovndb-tls-certs\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.452214 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c75w7\" (UniqueName: \"kubernetes.io/projected/62acb3be-36b9-469a-9714-5e29539324dc-kube-api-access-c75w7\") pod \"neutron-8475878fc-kbpdv\" (UID: \"62acb3be-36b9-469a-9714-5e29539324dc\") " pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.615751 4791 scope.go:117] "RemoveContainer" containerID="aeeea8e58ee1eebecabfc73135b1433844ac911ecd4b4ea0ef6be8c43773d7d1" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.750671 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.769581 4791 generic.go:334] "Generic (PLEG): container finished" podID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerID="7430a4fec703db85e10c20156d2b511933405b2404b2435e4746ddafbfb84c78" exitCode=0 Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.769690 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerDied","Data":"7430a4fec703db85e10c20156d2b511933405b2404b2435e4746ddafbfb84c78"} Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.798361 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" event={"ID":"20fc7e21-d219-4f68-96f6-5e823c16fde4","Type":"ContainerDied","Data":"73840ef4ff7ba4c9187f9cb2ac603014eddc06f071a2db9daded9ca4b5a2bc79"} Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.798402 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73840ef4ff7ba4c9187f9cb2ac603014eddc06f071a2db9daded9ca4b5a2bc79" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.813179 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57447cd8ff-krgft" event={"ID":"f500f20d-afc6-4646-8ed4-59b6897414b3","Type":"ContainerDied","Data":"de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181"} Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.813226 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de76ab03bd4057a2d6ed1f8c0d6e09684e1bcb146c88bff0325d5fa616b87181" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.816542 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" event={"ID":"f76d819d-a178-42b5-bc1d-642c8684a05b","Type":"ContainerDied","Data":"58ef14401bbb084b9037696844f75ff4049a7dbebd3006c5e3235257aa7571e0"} Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.816576 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58ef14401bbb084b9037696844f75ff4049a7dbebd3006c5e3235257aa7571e0" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.880897 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.895119 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:57:26 crc kubenswrapper[4791]: I0218 00:57:26.896166 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.028994 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq25c\" (UniqueName: \"kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029048 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029089 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data\") pod \"20fc7e21-d219-4f68-96f6-5e823c16fde4\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029109 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs\") pod \"20fc7e21-d219-4f68-96f6-5e823c16fde4\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029167 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029241 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data\") pod \"f500f20d-afc6-4646-8ed4-59b6897414b3\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029266 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom\") pod \"20fc7e21-d219-4f68-96f6-5e823c16fde4\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029325 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hb59\" (UniqueName: \"kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59\") pod \"20fc7e21-d219-4f68-96f6-5e823c16fde4\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029384 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom\") pod \"f500f20d-afc6-4646-8ed4-59b6897414b3\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029436 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle\") pod \"f500f20d-afc6-4646-8ed4-59b6897414b3\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029472 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5l4n\" (UniqueName: \"kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n\") pod \"f500f20d-afc6-4646-8ed4-59b6897414b3\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029502 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle\") pod \"20fc7e21-d219-4f68-96f6-5e823c16fde4\" (UID: \"20fc7e21-d219-4f68-96f6-5e823c16fde4\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029529 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029552 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029595 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs\") pod \"f500f20d-afc6-4646-8ed4-59b6897414b3\" (UID: \"f500f20d-afc6-4646-8ed4-59b6897414b3\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.029621 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb\") pod \"f76d819d-a178-42b5-bc1d-642c8684a05b\" (UID: \"f76d819d-a178-42b5-bc1d-642c8684a05b\") " Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.031717 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs" (OuterVolumeSpecName: "logs") pod "20fc7e21-d219-4f68-96f6-5e823c16fde4" (UID: "20fc7e21-d219-4f68-96f6-5e823c16fde4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.038618 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs" (OuterVolumeSpecName: "logs") pod "f500f20d-afc6-4646-8ed4-59b6897414b3" (UID: "f500f20d-afc6-4646-8ed4-59b6897414b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.044657 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "20fc7e21-d219-4f68-96f6-5e823c16fde4" (UID: "20fc7e21-d219-4f68-96f6-5e823c16fde4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.044912 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c" (OuterVolumeSpecName: "kube-api-access-xq25c") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "kube-api-access-xq25c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.044926 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f500f20d-afc6-4646-8ed4-59b6897414b3" (UID: "f500f20d-afc6-4646-8ed4-59b6897414b3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.045972 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59" (OuterVolumeSpecName: "kube-api-access-5hb59") pod "20fc7e21-d219-4f68-96f6-5e823c16fde4" (UID: "20fc7e21-d219-4f68-96f6-5e823c16fde4"). InnerVolumeSpecName "kube-api-access-5hb59". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.046649 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n" (OuterVolumeSpecName: "kube-api-access-h5l4n") pod "f500f20d-afc6-4646-8ed4-59b6897414b3" (UID: "f500f20d-afc6-4646-8ed4-59b6897414b3"). InnerVolumeSpecName "kube-api-access-h5l4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132167 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132189 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hb59\" (UniqueName: \"kubernetes.io/projected/20fc7e21-d219-4f68-96f6-5e823c16fde4-kube-api-access-5hb59\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132200 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132210 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5l4n\" (UniqueName: \"kubernetes.io/projected/f500f20d-afc6-4646-8ed4-59b6897414b3-kube-api-access-h5l4n\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132218 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f500f20d-afc6-4646-8ed4-59b6897414b3-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132227 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq25c\" (UniqueName: \"kubernetes.io/projected/f76d819d-a178-42b5-bc1d-642c8684a05b-kube-api-access-xq25c\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.132235 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20fc7e21-d219-4f68-96f6-5e823c16fde4-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.189835 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20fc7e21-d219-4f68-96f6-5e823c16fde4" (UID: "20fc7e21-d219-4f68-96f6-5e823c16fde4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.207650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data" (OuterVolumeSpecName: "config-data") pod "20fc7e21-d219-4f68-96f6-5e823c16fde4" (UID: "20fc7e21-d219-4f68-96f6-5e823c16fde4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.216633 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.218785 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.234263 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.234298 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.234310 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.234322 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20fc7e21-d219-4f68-96f6-5e823c16fde4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.240313 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.264178 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f500f20d-afc6-4646-8ed4-59b6897414b3" (UID: "f500f20d-afc6-4646-8ed4-59b6897414b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.276041 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data" (OuterVolumeSpecName: "config-data") pod "f500f20d-afc6-4646-8ed4-59b6897414b3" (UID: "f500f20d-afc6-4646-8ed4-59b6897414b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.285790 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config" (OuterVolumeSpecName: "config") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.308633 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.308954 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f76d819d-a178-42b5-bc1d-642c8684a05b" (UID: "f76d819d-a178-42b5-bc1d-642c8684a05b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.339469 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.339503 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.339512 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f500f20d-afc6-4646-8ed4-59b6897414b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.339524 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.339534 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f76d819d-a178-42b5-bc1d-642c8684a05b-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.392203 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.410370 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.791888 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8475878fc-kbpdv"] Feb 18 00:57:27 crc kubenswrapper[4791]: W0218 00:57:27.837053 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62acb3be_36b9_469a_9714_5e29539324dc.slice/crio-b1277f8bb0e67846a29f9a7a34d570d6bb040b70508b3c5cb9fb53d01add0510 WatchSource:0}: Error finding container b1277f8bb0e67846a29f9a7a34d570d6bb040b70508b3c5cb9fb53d01add0510: Status 404 returned error can't find the container with id b1277f8bb0e67846a29f9a7a34d570d6bb040b70508b3c5cb9fb53d01add0510 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.908285 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerStarted","Data":"07b4df9629c9caaeaed7b8853878691a1ef4bbde72fc1ed90c57c7fa19b1942b"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.908481 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-central-agent" containerID="cri-o://c10fa45ca056ac80520502873bc5c6375a6d6047634d7f5200e31cc5fd512f48" gracePeriod=30 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.908909 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.909272 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="sg-core" containerID="cri-o://aa476e5d026dcf8aac531d64b46fe9baa0c3e19528ba4c0aaf44c74767c825ee" gracePeriod=30 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.909275 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="proxy-httpd" containerID="cri-o://07b4df9629c9caaeaed7b8853878691a1ef4bbde72fc1ed90c57c7fa19b1942b" gracePeriod=30 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.909357 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-notification-agent" containerID="cri-o://cc15b53040307392520c6bf0afceeeae3fa7b4ede8c4b865650ef1bade4c4e37" gracePeriod=30 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.917408 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerStarted","Data":"fc613be1cf4ce675b1bcfe0897db0565bf6a781ed9d2a36d4a12b8fcee947e1a"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.918509 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerStarted","Data":"c33058911d5cdd321621338b8acc9da374e4f0a264a065420cc19959d1a5b858"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.919236 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8475878fc-kbpdv" event={"ID":"62acb3be-36b9-469a-9714-5e29539324dc","Type":"ContainerStarted","Data":"b1277f8bb0e67846a29f9a7a34d570d6bb040b70508b3c5cb9fb53d01add0510"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.926880 4791 generic.go:334] "Generic (PLEG): container finished" podID="825d75de-0281-4172-8f86-e2c23e4a818a" containerID="a16214bfe2450f85d637539c5f2856edc73efb431a901036708d6adb857a45f6" exitCode=0 Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.926974 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57447cd8ff-krgft" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.927053 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" event={"ID":"825d75de-0281-4172-8f86-e2c23e4a818a","Type":"ContainerDied","Data":"a16214bfe2450f85d637539c5f2856edc73efb431a901036708d6adb857a45f6"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.927097 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" event={"ID":"825d75de-0281-4172-8f86-e2c23e4a818a","Type":"ContainerStarted","Data":"542192bdb996760c36ab61f7f1c5d18795ceb4c2a470340789d62d29897918d5"} Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.928268 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.928280 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-54d4bd8c58-swbp6" Feb 18 00:57:27 crc kubenswrapper[4791]: I0218 00:57:27.940942 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.470336629 podStartE2EDuration="1m4.94092793s" podCreationTimestamp="2026-02-18 00:56:23 +0000 UTC" firstStartedPulling="2026-02-18 00:56:25.221920641 +0000 UTC m=+1326.789933811" lastFinishedPulling="2026-02-18 00:57:26.692511942 +0000 UTC m=+1388.260525112" observedRunningTime="2026-02-18 00:57:27.940630601 +0000 UTC m=+1389.508643771" watchObservedRunningTime="2026-02-18 00:57:27.94092793 +0000 UTC m=+1389.508941100" Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.077957 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.100261 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-57447cd8ff-krgft"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.119836 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.131890 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-9s7nn"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.144212 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.152588 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-54d4bd8c58-swbp6"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.159181 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.428762 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-654c877dd4-hwg2j" Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.481905 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-5ff469dfbc-w7krb" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.198:9696/\": dial tcp 10.217.0.198:9696: connect: connection refused" Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.556255 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.556599 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-54d9c7b78-mfjzv" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api" containerID="cri-o://ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e" gracePeriod=30 Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.556965 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-54d9c7b78-mfjzv" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api-log" containerID="cri-o://e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42" gracePeriod=30 Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.974542 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerStarted","Data":"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4"} Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.982236 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" event={"ID":"825d75de-0281-4172-8f86-e2c23e4a818a","Type":"ContainerStarted","Data":"7fbd46809c4d53a9a7508b40e630426d31eaa2554b9639bf1fc6eb11e186767a"} Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.982298 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.986119 4791 generic.go:334] "Generic (PLEG): container finished" podID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerID="a620b3185c82615b52303d4e4281a904fe9288345b93edc95bb400d64fb83874" exitCode=0 Feb 18 00:57:28 crc kubenswrapper[4791]: I0218 00:57:28.986194 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerDied","Data":"a620b3185c82615b52303d4e4281a904fe9288345b93edc95bb400d64fb83874"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.008907 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c66706d-8e2a-45e8-9722-0666be97569a" containerID="e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42" exitCode=143 Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.008995 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerDied","Data":"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.010617 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" podStartSLOduration=7.010592808 podStartE2EDuration="7.010592808s" podCreationTimestamp="2026-02-18 00:57:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:29.009215155 +0000 UTC m=+1390.577228325" watchObservedRunningTime="2026-02-18 00:57:29.010592808 +0000 UTC m=+1390.578605978" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.014110 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8475878fc-kbpdv" event={"ID":"62acb3be-36b9-469a-9714-5e29539324dc","Type":"ContainerStarted","Data":"76b9ada50c6ee3ed955d0775a3215649389ac0af83d1f3000f6cb7137936b5fe"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.018194 4791 generic.go:334] "Generic (PLEG): container finished" podID="65d7529c-6130-4f99-9212-d13950d08fd5" containerID="07b4df9629c9caaeaed7b8853878691a1ef4bbde72fc1ed90c57c7fa19b1942b" exitCode=0 Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.018226 4791 generic.go:334] "Generic (PLEG): container finished" podID="65d7529c-6130-4f99-9212-d13950d08fd5" containerID="aa476e5d026dcf8aac531d64b46fe9baa0c3e19528ba4c0aaf44c74767c825ee" exitCode=2 Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.018234 4791 generic.go:334] "Generic (PLEG): container finished" podID="65d7529c-6130-4f99-9212-d13950d08fd5" containerID="c10fa45ca056ac80520502873bc5c6375a6d6047634d7f5200e31cc5fd512f48" exitCode=0 Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.019414 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerDied","Data":"07b4df9629c9caaeaed7b8853878691a1ef4bbde72fc1ed90c57c7fa19b1942b"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.019455 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerDied","Data":"aa476e5d026dcf8aac531d64b46fe9baa0c3e19528ba4c0aaf44c74767c825ee"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.019465 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerDied","Data":"c10fa45ca056ac80520502873bc5c6375a6d6047634d7f5200e31cc5fd512f48"} Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.080939 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.081319 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" path="/var/lib/kubelet/pods/20fc7e21-d219-4f68-96f6-5e823c16fde4/volumes" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.081937 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" path="/var/lib/kubelet/pods/f500f20d-afc6-4646-8ed4-59b6897414b3/volumes" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.082495 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" path="/var/lib/kubelet/pods/f76d819d-a178-42b5-bc1d-642c8684a05b/volumes" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.208037 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.208102 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfqvp\" (UniqueName: \"kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.208865 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.208901 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.209314 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.209345 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.209400 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs\") pod \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\" (UID: \"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352\") " Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.216953 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.227737 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp" (OuterVolumeSpecName: "kube-api-access-tfqvp") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "kube-api-access-tfqvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.284774 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.311347 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.312118 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfqvp\" (UniqueName: \"kubernetes.io/projected/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-kube-api-access-tfqvp\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.312146 4791 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.312167 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.312177 4791 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.318607 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config" (OuterVolumeSpecName: "config") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.363623 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.381305 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" (UID: "7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.414065 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.414100 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:29 crc kubenswrapper[4791]: I0218 00:57:29.414112 4791 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.032200 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8475878fc-kbpdv" event={"ID":"62acb3be-36b9-469a-9714-5e29539324dc","Type":"ContainerStarted","Data":"34f3642473055a93c25985799804c5ae89959f61d45a35ac20e091d74032ad59"} Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.032684 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.034680 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerStarted","Data":"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35"} Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.034770 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api-log" containerID="cri-o://00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" gracePeriod=30 Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.034799 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.034836 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api" containerID="cri-o://7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" gracePeriod=30 Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.040474 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerStarted","Data":"723ec48d668b776326a6bcc75a1f688843088515e04ac2935a772006e7a8f3c3"} Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.046100 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ff469dfbc-w7krb" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.046089 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ff469dfbc-w7krb" event={"ID":"7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352","Type":"ContainerDied","Data":"396bc762c002400aa7b56e18187ef4bbbf71ce620b59b82f1c78433a285534c1"} Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.046162 4791 scope.go:117] "RemoveContainer" containerID="7430a4fec703db85e10c20156d2b511933405b2404b2435e4746ddafbfb84c78" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.076114 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8475878fc-kbpdv" podStartSLOduration=4.076097707 podStartE2EDuration="4.076097707s" podCreationTimestamp="2026-02-18 00:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:30.07100762 +0000 UTC m=+1391.639020790" watchObservedRunningTime="2026-02-18 00:57:30.076097707 +0000 UTC m=+1391.644110877" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.093360 4791 scope.go:117] "RemoveContainer" containerID="a620b3185c82615b52303d4e4281a904fe9288345b93edc95bb400d64fb83874" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.103846 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=8.103827546 podStartE2EDuration="8.103827546s" podCreationTimestamp="2026-02-18 00:57:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:30.093518857 +0000 UTC m=+1391.661532027" watchObservedRunningTime="2026-02-18 00:57:30.103827546 +0000 UTC m=+1391.671840716" Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.123279 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.130990 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5ff469dfbc-w7krb"] Feb 18 00:57:30 crc kubenswrapper[4791]: I0218 00:57:30.312209 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-84b966f6c9-9s7nn" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.196:5353: i/o timeout" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.067149 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.074551 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" path="/var/lib/kubelet/pods/7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352/volumes" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076747 4791 generic.go:334] "Generic (PLEG): container finished" podID="501a1664-2954-479d-8f39-476611bddbde" containerID="7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" exitCode=0 Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076769 4791 generic.go:334] "Generic (PLEG): container finished" podID="501a1664-2954-479d-8f39-476611bddbde" containerID="00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" exitCode=143 Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076801 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerDied","Data":"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35"} Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076824 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerDied","Data":"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4"} Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076834 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"501a1664-2954-479d-8f39-476611bddbde","Type":"ContainerDied","Data":"fc613be1cf4ce675b1bcfe0897db0565bf6a781ed9d2a36d4a12b8fcee947e1a"} Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076849 4791 scope.go:117] "RemoveContainer" containerID="7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.076953 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.079947 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerStarted","Data":"9cb5f64bcd6c0bdff7e65174a088eaa43b6a5af87316b80273232a0278579ed6"} Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.115704 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.217568161 podStartE2EDuration="9.115685123s" podCreationTimestamp="2026-02-18 00:57:22 +0000 UTC" firstStartedPulling="2026-02-18 00:57:27.383292319 +0000 UTC m=+1388.951305489" lastFinishedPulling="2026-02-18 00:57:28.281409281 +0000 UTC m=+1389.849422451" observedRunningTime="2026-02-18 00:57:31.106103006 +0000 UTC m=+1392.674116176" watchObservedRunningTime="2026-02-18 00:57:31.115685123 +0000 UTC m=+1392.683698293" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.124451 4791 scope.go:117] "RemoveContainer" containerID="00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.146715 4791 scope.go:117] "RemoveContainer" containerID="7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.147068 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35\": container with ID starting with 7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35 not found: ID does not exist" containerID="7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147098 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35"} err="failed to get container status \"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35\": rpc error: code = NotFound desc = could not find container \"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35\": container with ID starting with 7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35 not found: ID does not exist" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147119 4791 scope.go:117] "RemoveContainer" containerID="00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.147567 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4\": container with ID starting with 00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4 not found: ID does not exist" containerID="00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147601 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4"} err="failed to get container status \"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4\": rpc error: code = NotFound desc = could not find container \"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4\": container with ID starting with 00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4 not found: ID does not exist" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147617 4791 scope.go:117] "RemoveContainer" containerID="7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147783 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35"} err="failed to get container status \"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35\": rpc error: code = NotFound desc = could not find container \"7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35\": container with ID starting with 7d776564ca4ff6a3a5d993c65419701f79c3d07a8f898b659ae9ce9066fa6f35 not found: ID does not exist" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147801 4791 scope.go:117] "RemoveContainer" containerID="00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.147935 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4"} err="failed to get container status \"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4\": rpc error: code = NotFound desc = could not find container \"00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4\": container with ID starting with 00efe43f524fc6242412c63b8306753df96fc59b2bc81cff3c29dbc6ca5b40e4 not found: ID does not exist" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.161831 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.161945 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162035 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162124 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162233 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162274 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162328 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s69kd\" (UniqueName: \"kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd\") pod \"501a1664-2954-479d-8f39-476611bddbde\" (UID: \"501a1664-2954-479d-8f39-476611bddbde\") " Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.162327 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs" (OuterVolumeSpecName: "logs") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.163306 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.432030 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts" (OuterVolumeSpecName: "scripts") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.432151 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd" (OuterVolumeSpecName: "kube-api-access-s69kd") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "kube-api-access-s69kd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.443662 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.443777 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/501a1664-2954-479d-8f39-476611bddbde-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.443819 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.443841 4791 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/501a1664-2954-479d-8f39-476611bddbde-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.443860 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s69kd\" (UniqueName: \"kubernetes.io/projected/501a1664-2954-479d-8f39-476611bddbde-kube-api-access-s69kd\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.481417 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.489093 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data" (OuterVolumeSpecName: "config-data") pod "501a1664-2954-479d-8f39-476611bddbde" (UID: "501a1664-2954-479d-8f39-476611bddbde"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.547702 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.548081 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.548094 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/501a1664-2954-479d-8f39-476611bddbde-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.718486 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.730530 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.752385 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.752939 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.752962 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.752981 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="dnsmasq-dns" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.752989 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="dnsmasq-dns" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753003 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753009 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753023 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-httpd" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753029 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-httpd" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753046 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-api" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753052 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-api" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753066 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753073 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753097 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753106 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener-log" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753119 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753128 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api-log" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753150 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753178 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker-log" Feb 18 00:57:31 crc kubenswrapper[4791]: E0218 00:57:31.753205 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="init" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753213 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="init" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753464 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753485 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="20fc7e21-d219-4f68-96f6-5e823c16fde4" containerName="barbican-keystone-listener-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753501 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753514 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f500f20d-afc6-4646-8ed4-59b6897414b3" containerName="barbican-worker-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753525 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753541 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f76d819d-a178-42b5-bc1d-642c8684a05b" containerName="dnsmasq-dns" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753551 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="501a1664-2954-479d-8f39-476611bddbde" containerName="cinder-api-log" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753563 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-httpd" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.753573 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b2ac3b4-8a5b-4f89-8d0e-7657ff9d6352" containerName="neutron-api" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.755401 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.757795 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.757961 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.772315 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.783369 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853374 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853432 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blmrw\" (UniqueName: \"kubernetes.io/projected/6bfc0893-2e4f-43da-b675-15687e8a3436-kube-api-access-blmrw\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853483 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853659 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfc0893-2e4f-43da-b675-15687e8a3436-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853763 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853822 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853913 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.853991 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfc0893-2e4f-43da-b675-15687e8a3436-logs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.854193 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-scripts\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955802 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfc0893-2e4f-43da-b675-15687e8a3436-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955857 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955885 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955923 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955938 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfc0893-2e4f-43da-b675-15687e8a3436-logs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.955992 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-scripts\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.956053 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.956081 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blmrw\" (UniqueName: \"kubernetes.io/projected/6bfc0893-2e4f-43da-b675-15687e8a3436-kube-api-access-blmrw\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.956106 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.958271 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bfc0893-2e4f-43da-b675-15687e8a3436-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.959792 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bfc0893-2e4f-43da-b675-15687e8a3436-logs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.971409 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.972212 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-scripts\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.984395 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blmrw\" (UniqueName: \"kubernetes.io/projected/6bfc0893-2e4f-43da-b675-15687e8a3436-kube-api-access-blmrw\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.984494 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.985265 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.985845 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:31 crc kubenswrapper[4791]: I0218 00:57:31.992920 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bfc0893-2e4f-43da-b675-15687e8a3436-config-data-custom\") pod \"cinder-api-0\" (UID: \"6bfc0893-2e4f-43da-b675-15687e8a3436\") " pod="openstack/cinder-api-0" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.071730 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.112841 4791 generic.go:334] "Generic (PLEG): container finished" podID="65d7529c-6130-4f99-9212-d13950d08fd5" containerID="cc15b53040307392520c6bf0afceeeae3fa7b4ede8c4b865650ef1bade4c4e37" exitCode=0 Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.112904 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerDied","Data":"cc15b53040307392520c6bf0afceeeae3fa7b4ede8c4b865650ef1bade4c4e37"} Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.230094 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-54d9c7b78-mfjzv" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.207:9311/healthcheck\": dial tcp 10.217.0.207:9311: connect: connection refused" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.234878 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-54d9c7b78-mfjzv" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.207:9311/healthcheck\": dial tcp 10.217.0.207:9311: connect: connection refused" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.327902 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.475995 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476061 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476088 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476199 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sznfl\" (UniqueName: \"kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476326 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476415 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476487 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd\") pod \"65d7529c-6130-4f99-9212-d13950d08fd5\" (UID: \"65d7529c-6130-4f99-9212-d13950d08fd5\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.476948 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.477590 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.478678 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.491604 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts" (OuterVolumeSpecName: "scripts") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.491710 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl" (OuterVolumeSpecName: "kube-api-access-sznfl") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "kube-api-access-sznfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.522402 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.582498 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65d7529c-6130-4f99-9212-d13950d08fd5-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.582538 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.582550 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sznfl\" (UniqueName: \"kubernetes.io/projected/65d7529c-6130-4f99-9212-d13950d08fd5-kube-api-access-sznfl\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.582559 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.620647 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.622788 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: W0218 00:57:32.624564 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bfc0893_2e4f_43da_b675_15687e8a3436.slice/crio-a8fa5d6de44a57a397ffed5f1f25b4eff0cbcd1ae64f8647f9b76a2d246b4c70 WatchSource:0}: Error finding container a8fa5d6de44a57a397ffed5f1f25b4eff0cbcd1ae64f8647f9b76a2d246b4c70: Status 404 returned error can't find the container with id a8fa5d6de44a57a397ffed5f1f25b4eff0cbcd1ae64f8647f9b76a2d246b4c70 Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.654495 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data" (OuterVolumeSpecName: "config-data") pod "65d7529c-6130-4f99-9212-d13950d08fd5" (UID: "65d7529c-6130-4f99-9212-d13950d08fd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.684750 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.684779 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65d7529c-6130-4f99-9212-d13950d08fd5-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.771277 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910085 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data\") pod \"0c66706d-8e2a-45e8-9722-0666be97569a\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910269 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle\") pod \"0c66706d-8e2a-45e8-9722-0666be97569a\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910310 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom\") pod \"0c66706d-8e2a-45e8-9722-0666be97569a\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910485 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs\") pod \"0c66706d-8e2a-45e8-9722-0666be97569a\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910568 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vtsq\" (UniqueName: \"kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq\") pod \"0c66706d-8e2a-45e8-9722-0666be97569a\" (UID: \"0c66706d-8e2a-45e8-9722-0666be97569a\") " Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.910972 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs" (OuterVolumeSpecName: "logs") pod "0c66706d-8e2a-45e8-9722-0666be97569a" (UID: "0c66706d-8e2a-45e8-9722-0666be97569a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.911354 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c66706d-8e2a-45e8-9722-0666be97569a-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.914748 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0c66706d-8e2a-45e8-9722-0666be97569a" (UID: "0c66706d-8e2a-45e8-9722-0666be97569a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.915435 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq" (OuterVolumeSpecName: "kube-api-access-2vtsq") pod "0c66706d-8e2a-45e8-9722-0666be97569a" (UID: "0c66706d-8e2a-45e8-9722-0666be97569a"). InnerVolumeSpecName "kube-api-access-2vtsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.957475 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c66706d-8e2a-45e8-9722-0666be97569a" (UID: "0c66706d-8e2a-45e8-9722-0666be97569a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:32 crc kubenswrapper[4791]: I0218 00:57:32.970995 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data" (OuterVolumeSpecName: "config-data") pod "0c66706d-8e2a-45e8-9722-0666be97569a" (UID: "0c66706d-8e2a-45e8-9722-0666be97569a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.014831 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vtsq\" (UniqueName: \"kubernetes.io/projected/0c66706d-8e2a-45e8-9722-0666be97569a-kube-api-access-2vtsq\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.014866 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.014876 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.014885 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c66706d-8e2a-45e8-9722-0666be97569a-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.076857 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="501a1664-2954-479d-8f39-476611bddbde" path="/var/lib/kubelet/pods/501a1664-2954-479d-8f39-476611bddbde/volumes" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.137457 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c66706d-8e2a-45e8-9722-0666be97569a" containerID="ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e" exitCode=0 Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.137525 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerDied","Data":"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e"} Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.137555 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-54d9c7b78-mfjzv" event={"ID":"0c66706d-8e2a-45e8-9722-0666be97569a","Type":"ContainerDied","Data":"ed039516256c3885491c747f776c3a9f5b331fef3dfbfc6be53a16b3de7cc183"} Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.137572 4791 scope.go:117] "RemoveContainer" containerID="ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.137680 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-54d9c7b78-mfjzv" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.148881 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfc0893-2e4f-43da-b675-15687e8a3436","Type":"ContainerStarted","Data":"a8fa5d6de44a57a397ffed5f1f25b4eff0cbcd1ae64f8647f9b76a2d246b4c70"} Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.160636 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65d7529c-6130-4f99-9212-d13950d08fd5","Type":"ContainerDied","Data":"561a076ee84652b53ce5a564cba79ea0ff1bb5938aa8cdfee1a8994c6cd72885"} Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.160726 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.164751 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.181100 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-54d9c7b78-mfjzv"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.188293 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.201122 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.209724 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.210075 4791 scope.go:117] "RemoveContainer" containerID="e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.236781 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.238067 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.238091 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.238121 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-central-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.238128 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-central-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.238148 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api-log" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.238156 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api-log" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.239250 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-notification-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239268 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-notification-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.239294 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="sg-core" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239301 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="sg-core" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.239315 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="proxy-httpd" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239321 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="proxy-httpd" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239863 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-central-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239900 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="ceilometer-notification-agent" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239919 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="proxy-httpd" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239939 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" containerName="sg-core" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239960 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api-log" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.239974 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" containerName="barbican-api" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.249806 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.252296 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.253718 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.270377 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.285668 4791 scope.go:117] "RemoveContainer" containerID="ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.293055 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e\": container with ID starting with ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e not found: ID does not exist" containerID="ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.293135 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e"} err="failed to get container status \"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e\": rpc error: code = NotFound desc = could not find container \"ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e\": container with ID starting with ee84c04900e1ef55b00c71be03d9a7a29491b84b8de95bc4cf8beedc65d8385e not found: ID does not exist" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.293173 4791 scope.go:117] "RemoveContainer" containerID="e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42" Feb 18 00:57:33 crc kubenswrapper[4791]: E0218 00:57:33.295922 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42\": container with ID starting with e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42 not found: ID does not exist" containerID="e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.295969 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42"} err="failed to get container status \"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42\": rpc error: code = NotFound desc = could not find container \"e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42\": container with ID starting with e0194aef92cb0fe6fe78a05f1e4c8d53b03669429bc41c364943c1772c344c42 not found: ID does not exist" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.295998 4791 scope.go:117] "RemoveContainer" containerID="07b4df9629c9caaeaed7b8853878691a1ef4bbde72fc1ed90c57c7fa19b1942b" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.296939 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.334030 4791 scope.go:117] "RemoveContainer" containerID="aa476e5d026dcf8aac531d64b46fe9baa0c3e19528ba4c0aaf44c74767c825ee" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.350446 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.350679 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="dnsmasq-dns" containerID="cri-o://0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f" gracePeriod=10 Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.353988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354031 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354186 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354313 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354665 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h466x\" (UniqueName: \"kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354944 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.354963 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.369566 4791 scope.go:117] "RemoveContainer" containerID="cc15b53040307392520c6bf0afceeeae3fa7b4ede8c4b865650ef1bade4c4e37" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.422053 4791 scope.go:117] "RemoveContainer" containerID="c10fa45ca056ac80520502873bc5c6375a6d6047634d7f5200e31cc5fd512f48" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.456646 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.456686 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.456774 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.456834 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.456903 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h466x\" (UniqueName: \"kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.457017 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.457032 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.458459 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.458689 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.460355 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.460887 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.461279 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.464529 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.483289 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h466x\" (UniqueName: \"kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x\") pod \"ceilometer-0\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " pod="openstack/ceilometer-0" Feb 18 00:57:33 crc kubenswrapper[4791]: I0218 00:57:33.580680 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.001580 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074128 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074279 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074342 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074477 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074534 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9928q\" (UniqueName: \"kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.074564 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config\") pod \"e2aa9956-23e9-4002-befe-79a9244d2149\" (UID: \"e2aa9956-23e9-4002-befe-79a9244d2149\") " Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.077305 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.099370 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q" (OuterVolumeSpecName: "kube-api-access-9928q") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "kube-api-access-9928q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.144582 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.145655 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.156540 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.158100 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.163110 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config" (OuterVolumeSpecName: "config") pod "e2aa9956-23e9-4002-befe-79a9244d2149" (UID: "e2aa9956-23e9-4002-befe-79a9244d2149"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.179089 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.181386 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.181407 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.181418 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.181438 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9928q\" (UniqueName: \"kubernetes.io/projected/e2aa9956-23e9-4002-befe-79a9244d2149-kube-api-access-9928q\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.181448 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2aa9956-23e9-4002-befe-79a9244d2149-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.188984 4791 generic.go:334] "Generic (PLEG): container finished" podID="e2aa9956-23e9-4002-befe-79a9244d2149" containerID="0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f" exitCode=0 Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.189061 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" event={"ID":"e2aa9956-23e9-4002-befe-79a9244d2149","Type":"ContainerDied","Data":"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f"} Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.189099 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" event={"ID":"e2aa9956-23e9-4002-befe-79a9244d2149","Type":"ContainerDied","Data":"da041084c2e1634edbea31f361e823339fb29a6ab250913fc9bb7d0695d0d44e"} Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.189121 4791 scope.go:117] "RemoveContainer" containerID="0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.189336 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-cn78k" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.195745 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfc0893-2e4f-43da-b675-15687e8a3436","Type":"ContainerStarted","Data":"5350d5192df835db31fd2a914fc4fb90b7b176772e9d4986ddf1415582c34b53"} Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.195802 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6bfc0893-2e4f-43da-b675-15687e8a3436","Type":"ContainerStarted","Data":"a3cb25d92a40bfdd7786e3133075449fdad3a9ca69c3d442bfa47735eb63bf79"} Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.195915 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.197378 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerStarted","Data":"e32538aced1a1f9061c40fb59b36f4ed2dba77af83aa13dccbf36fcfb76bb1cd"} Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.218278 4791 scope.go:117] "RemoveContainer" containerID="634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.237267 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.237233988 podStartE2EDuration="3.237233988s" podCreationTimestamp="2026-02-18 00:57:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:34.22631425 +0000 UTC m=+1395.794327420" watchObservedRunningTime="2026-02-18 00:57:34.237233988 +0000 UTC m=+1395.805247158" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.254322 4791 scope.go:117] "RemoveContainer" containerID="0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f" Feb 18 00:57:34 crc kubenswrapper[4791]: E0218 00:57:34.254804 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f\": container with ID starting with 0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f not found: ID does not exist" containerID="0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.254859 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f"} err="failed to get container status \"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f\": rpc error: code = NotFound desc = could not find container \"0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f\": container with ID starting with 0c0bb29a8673ee9cb1a702ed6ce7bcebcf8926c9b64eec4852f39bd650a0a95f not found: ID does not exist" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.254889 4791 scope.go:117] "RemoveContainer" containerID="634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c" Feb 18 00:57:34 crc kubenswrapper[4791]: E0218 00:57:34.255237 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c\": container with ID starting with 634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c not found: ID does not exist" containerID="634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.255269 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c"} err="failed to get container status \"634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c\": rpc error: code = NotFound desc = could not find container \"634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c\": container with ID starting with 634dfe8dfa7637f01ca3c7bd2f831e5f7fbd7a4eca79dbc4e59eda300337eb0c not found: ID does not exist" Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.260952 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:34 crc kubenswrapper[4791]: I0218 00:57:34.275838 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-cn78k"] Feb 18 00:57:35 crc kubenswrapper[4791]: I0218 00:57:35.078377 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c66706d-8e2a-45e8-9722-0666be97569a" path="/var/lib/kubelet/pods/0c66706d-8e2a-45e8-9722-0666be97569a/volumes" Feb 18 00:57:35 crc kubenswrapper[4791]: I0218 00:57:35.079931 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65d7529c-6130-4f99-9212-d13950d08fd5" path="/var/lib/kubelet/pods/65d7529c-6130-4f99-9212-d13950d08fd5/volumes" Feb 18 00:57:35 crc kubenswrapper[4791]: I0218 00:57:35.080708 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" path="/var/lib/kubelet/pods/e2aa9956-23e9-4002-befe-79a9244d2149/volumes" Feb 18 00:57:35 crc kubenswrapper[4791]: I0218 00:57:35.220957 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerStarted","Data":"4465be71ab647b33970d4a204efcf7a436c78286539bc074a504e1389aa3d92f"} Feb 18 00:57:36 crc kubenswrapper[4791]: I0218 00:57:36.235385 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerStarted","Data":"216248e56f84af6b908e0abad59dce5fb324f189ff519cc6095e06d70d532305"} Feb 18 00:57:36 crc kubenswrapper[4791]: I0218 00:57:36.235750 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerStarted","Data":"1d083905565b7442c7ab2e96d29ccc4ab05dc8aa982cac0277293ad347ae5d6d"} Feb 18 00:57:36 crc kubenswrapper[4791]: I0218 00:57:36.742322 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:36 crc kubenswrapper[4791]: I0218 00:57:36.754641 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.023578 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-568cb6f944-cxt84"] Feb 18 00:57:37 crc kubenswrapper[4791]: E0218 00:57:37.024136 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="init" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.024154 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="init" Feb 18 00:57:37 crc kubenswrapper[4791]: E0218 00:57:37.024201 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="dnsmasq-dns" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.024208 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="dnsmasq-dns" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.024408 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2aa9956-23e9-4002-befe-79a9244d2149" containerName="dnsmasq-dns" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.025654 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.037119 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-568cb6f944-cxt84"] Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159499 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-internal-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159565 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rb4l\" (UniqueName: \"kubernetes.io/projected/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-kube-api-access-2rb4l\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159630 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-logs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159662 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-config-data\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159697 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-scripts\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159726 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-public-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.159800 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-combined-ca-bundle\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262182 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-combined-ca-bundle\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262274 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-internal-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262322 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rb4l\" (UniqueName: \"kubernetes.io/projected/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-kube-api-access-2rb4l\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262398 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-logs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262435 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-config-data\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262482 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-scripts\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.262520 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-public-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.263206 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-logs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.267047 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-scripts\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.267709 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-combined-ca-bundle\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.267793 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-config-data\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.272756 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-internal-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.273214 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-public-tls-certs\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.280085 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rb4l\" (UniqueName: \"kubernetes.io/projected/58b78dc2-71f8-4009-b3b0-f3db0fee55c4-kube-api-access-2rb4l\") pod \"placement-568cb6f944-cxt84\" (UID: \"58b78dc2-71f8-4009-b3b0-f3db0fee55c4\") " pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.348992 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:37 crc kubenswrapper[4791]: I0218 00:57:37.861927 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-568cb6f944-cxt84"] Feb 18 00:57:37 crc kubenswrapper[4791]: W0218 00:57:37.871386 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58b78dc2_71f8_4009_b3b0_f3db0fee55c4.slice/crio-173278293be9ccc99b3588a8467f9a6160c6a96893373639b7e0af4562b276e3 WatchSource:0}: Error finding container 173278293be9ccc99b3588a8467f9a6160c6a96893373639b7e0af4562b276e3: Status 404 returned error can't find the container with id 173278293be9ccc99b3588a8467f9a6160c6a96893373639b7e0af4562b276e3 Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.257539 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-568cb6f944-cxt84" event={"ID":"58b78dc2-71f8-4009-b3b0-f3db0fee55c4","Type":"ContainerStarted","Data":"e13c7c182e39abafa6be2cd5a0f90f05bade75713320e8ca53c8e00480062388"} Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.257597 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-568cb6f944-cxt84" event={"ID":"58b78dc2-71f8-4009-b3b0-f3db0fee55c4","Type":"ContainerStarted","Data":"ba08c5344aa6f96c1352e41f194833199be09598a161192d80229a8123eed980"} Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.257609 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-568cb6f944-cxt84" event={"ID":"58b78dc2-71f8-4009-b3b0-f3db0fee55c4","Type":"ContainerStarted","Data":"173278293be9ccc99b3588a8467f9a6160c6a96893373639b7e0af4562b276e3"} Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.257708 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.263749 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerStarted","Data":"676b2f0e14dc541f44cb23a5ea59f2519f4bbfe7d8ec509d3834ad5d0b5e3e82"} Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.263979 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.285305 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-568cb6f944-cxt84" podStartSLOduration=2.2852853140000002 podStartE2EDuration="2.285285314s" podCreationTimestamp="2026-02-18 00:57:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:38.279341 +0000 UTC m=+1399.847354180" watchObservedRunningTime="2026-02-18 00:57:38.285285314 +0000 UTC m=+1399.853298484" Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.305992 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.761260437 podStartE2EDuration="5.305964955s" podCreationTimestamp="2026-02-18 00:57:33 +0000 UTC" firstStartedPulling="2026-02-18 00:57:34.091878334 +0000 UTC m=+1395.659891504" lastFinishedPulling="2026-02-18 00:57:37.636582862 +0000 UTC m=+1399.204596022" observedRunningTime="2026-02-18 00:57:38.30130142 +0000 UTC m=+1399.869314590" watchObservedRunningTime="2026-02-18 00:57:38.305964955 +0000 UTC m=+1399.873978125" Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.480490 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 18 00:57:38 crc kubenswrapper[4791]: I0218 00:57:38.532341 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:39 crc kubenswrapper[4791]: I0218 00:57:39.274033 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="cinder-scheduler" containerID="cri-o://723ec48d668b776326a6bcc75a1f688843088515e04ac2935a772006e7a8f3c3" gracePeriod=30 Feb 18 00:57:39 crc kubenswrapper[4791]: I0218 00:57:39.274643 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="probe" containerID="cri-o://9cb5f64bcd6c0bdff7e65174a088eaa43b6a5af87316b80273232a0278579ed6" gracePeriod=30 Feb 18 00:57:39 crc kubenswrapper[4791]: I0218 00:57:39.274828 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:57:40 crc kubenswrapper[4791]: I0218 00:57:40.299976 4791 generic.go:334] "Generic (PLEG): container finished" podID="05cc6249-6250-478f-a6a3-226ebea58d94" containerID="9cb5f64bcd6c0bdff7e65174a088eaa43b6a5af87316b80273232a0278579ed6" exitCode=0 Feb 18 00:57:40 crc kubenswrapper[4791]: I0218 00:57:40.300066 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerDied","Data":"9cb5f64bcd6c0bdff7e65174a088eaa43b6a5af87316b80273232a0278579ed6"} Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.321228 4791 generic.go:334] "Generic (PLEG): container finished" podID="05cc6249-6250-478f-a6a3-226ebea58d94" containerID="723ec48d668b776326a6bcc75a1f688843088515e04ac2935a772006e7a8f3c3" exitCode=0 Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.321650 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerDied","Data":"723ec48d668b776326a6bcc75a1f688843088515e04ac2935a772006e7a8f3c3"} Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.721612 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.871552 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.871634 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.871743 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.871876 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.871944 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.872026 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bc24\" (UniqueName: \"kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24\") pod \"05cc6249-6250-478f-a6a3-226ebea58d94\" (UID: \"05cc6249-6250-478f-a6a3-226ebea58d94\") " Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.873886 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.881107 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24" (OuterVolumeSpecName: "kube-api-access-9bc24") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "kube-api-access-9bc24". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.901187 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.908841 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts" (OuterVolumeSpecName: "scripts") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.952133 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.975256 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.975291 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.975304 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bc24\" (UniqueName: \"kubernetes.io/projected/05cc6249-6250-478f-a6a3-226ebea58d94-kube-api-access-9bc24\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.975313 4791 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05cc6249-6250-478f-a6a3-226ebea58d94-etc-machine-id\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:41 crc kubenswrapper[4791]: I0218 00:57:41.975323 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.013113 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data" (OuterVolumeSpecName: "config-data") pod "05cc6249-6250-478f-a6a3-226ebea58d94" (UID: "05cc6249-6250-478f-a6a3-226ebea58d94"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.077369 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05cc6249-6250-478f-a6a3-226ebea58d94-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.352264 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"05cc6249-6250-478f-a6a3-226ebea58d94","Type":"ContainerDied","Data":"c33058911d5cdd321621338b8acc9da374e4f0a264a065420cc19959d1a5b858"} Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.353396 4791 scope.go:117] "RemoveContainer" containerID="9cb5f64bcd6c0bdff7e65174a088eaa43b6a5af87316b80273232a0278579ed6" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.352453 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.401510 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.405699 4791 scope.go:117] "RemoveContainer" containerID="723ec48d668b776326a6bcc75a1f688843088515e04ac2935a772006e7a8f3c3" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.420546 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.431230 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:42 crc kubenswrapper[4791]: E0218 00:57:42.439119 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="probe" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.439251 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="probe" Feb 18 00:57:42 crc kubenswrapper[4791]: E0218 00:57:42.439305 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="cinder-scheduler" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.439317 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="cinder-scheduler" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.440087 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="probe" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.440132 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" containerName="cinder-scheduler" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.445344 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.450975 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.470643 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594290 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb4x7\" (UniqueName: \"kubernetes.io/projected/5983f269-c1da-4b3d-90dd-083fa90022eb-kube-api-access-sb4x7\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594348 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-scripts\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594401 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5983f269-c1da-4b3d-90dd-083fa90022eb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594429 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594446 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.594881 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697480 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697535 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697640 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697718 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb4x7\" (UniqueName: \"kubernetes.io/projected/5983f269-c1da-4b3d-90dd-083fa90022eb-kube-api-access-sb4x7\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697746 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-scripts\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697789 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5983f269-c1da-4b3d-90dd-083fa90022eb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.697877 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5983f269-c1da-4b3d-90dd-083fa90022eb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.706465 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.706499 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-scripts\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.708805 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.717390 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5983f269-c1da-4b3d-90dd-083fa90022eb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.726285 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb4x7\" (UniqueName: \"kubernetes.io/projected/5983f269-c1da-4b3d-90dd-083fa90022eb-kube-api-access-sb4x7\") pod \"cinder-scheduler-0\" (UID: \"5983f269-c1da-4b3d-90dd-083fa90022eb\") " pod="openstack/cinder-scheduler-0" Feb 18 00:57:42 crc kubenswrapper[4791]: I0218 00:57:42.776859 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.074871 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05cc6249-6250-478f-a6a3-226ebea58d94" path="/var/lib/kubelet/pods/05cc6249-6250-478f-a6a3-226ebea58d94/volumes" Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.248563 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.366935 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5983f269-c1da-4b3d-90dd-083fa90022eb","Type":"ContainerStarted","Data":"0ddd6b7cee3da7270195001bac65e218bd6251a43d8ef459cd662e7720e72165"} Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.419983 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-85bf744856-fnxxz" Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.938884 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.941453 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:43 crc kubenswrapper[4791]: I0218 00:57:43.959687 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.043381 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql95m\" (UniqueName: \"kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.043445 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.043512 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.146045 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql95m\" (UniqueName: \"kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.146099 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.146169 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.146625 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.147186 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.163563 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql95m\" (UniqueName: \"kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m\") pod \"redhat-operators-wvm6h\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.179809 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.390180 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.394962 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5983f269-c1da-4b3d-90dd-083fa90022eb","Type":"ContainerStarted","Data":"5681a55e5eb5938d953b9fd0d7e7e86733a6a1e9d3f08d76106380b8c37606b5"} Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.754216 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.756863 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.758507 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.758786 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-tdksp" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.764957 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.766783 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.872373 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dssn\" (UniqueName: \"kubernetes.io/projected/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-kube-api-access-2dssn\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.872419 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config-secret\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.872506 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.872568 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.978997 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dssn\" (UniqueName: \"kubernetes.io/projected/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-kube-api-access-2dssn\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.979065 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config-secret\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.979180 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.979255 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:44 crc kubenswrapper[4791]: I0218 00:57:44.983539 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.002041 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-openstack-config-secret\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.003269 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.004997 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dssn\" (UniqueName: \"kubernetes.io/projected/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-kube-api-access-2dssn\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.005797 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10a79e5-3fc8-492a-9551-ee6cd80c2f83-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e10a79e5-3fc8-492a-9551-ee6cd80c2f83\") " pod="openstack/openstackclient" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.094072 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.435474 4791 generic.go:334] "Generic (PLEG): container finished" podID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerID="91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f" exitCode=0 Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.435908 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerDied","Data":"91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f"} Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.436442 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerStarted","Data":"5fa8e1eb4cd5eb6b473bec7b29777c9dfd6032a45f03551fe3c346dbb1164a12"} Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.464251 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5983f269-c1da-4b3d-90dd-083fa90022eb","Type":"ContainerStarted","Data":"d609d6bda2f80ccb002e59794af0d460570c35028c1c0c264045222bd5f0d6a9"} Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.515430 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.515405311 podStartE2EDuration="3.515405311s" podCreationTimestamp="2026-02-18 00:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:45.48953655 +0000 UTC m=+1407.057549720" watchObservedRunningTime="2026-02-18 00:57:45.515405311 +0000 UTC m=+1407.083418471" Feb 18 00:57:45 crc kubenswrapper[4791]: I0218 00:57:45.593179 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Feb 18 00:57:45 crc kubenswrapper[4791]: W0218 00:57:45.599629 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode10a79e5_3fc8_492a_9551_ee6cd80c2f83.slice/crio-d7082d1dc48ba33f31d8afc1fa3a65d0f8e6b95f2bf86323878e6e2d5ac094d2 WatchSource:0}: Error finding container d7082d1dc48ba33f31d8afc1fa3a65d0f8e6b95f2bf86323878e6e2d5ac094d2: Status 404 returned error can't find the container with id d7082d1dc48ba33f31d8afc1fa3a65d0f8e6b95f2bf86323878e6e2d5ac094d2 Feb 18 00:57:46 crc kubenswrapper[4791]: I0218 00:57:46.480745 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e10a79e5-3fc8-492a-9551-ee6cd80c2f83","Type":"ContainerStarted","Data":"d7082d1dc48ba33f31d8afc1fa3a65d0f8e6b95f2bf86323878e6e2d5ac094d2"} Feb 18 00:57:47 crc kubenswrapper[4791]: I0218 00:57:47.494576 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerStarted","Data":"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b"} Feb 18 00:57:47 crc kubenswrapper[4791]: I0218 00:57:47.777877 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.605387 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.607188 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.611921 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.612117 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x765r" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.618802 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.633655 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.728644 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq7dn\" (UniqueName: \"kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.728729 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.728770 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.728789 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.750832 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.753270 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.780870 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.796599 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.798702 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.809050 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.831941 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.832113 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.832150 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq7dn\" (UniqueName: \"kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.853994 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854204 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854322 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854435 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854585 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854662 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.854769 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt899\" (UniqueName: \"kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.863286 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.864206 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.874532 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq7dn\" (UniqueName: \"kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.884506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle\") pod \"heat-engine-5686f7fc6f-xd9xk\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.919230 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.929741 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958064 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958123 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958140 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958209 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt899\" (UniqueName: \"kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958239 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958275 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958292 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrs2f\" (UniqueName: \"kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958342 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958381 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.958435 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.959350 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.959850 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.960881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.965431 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.974587 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.986206 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.987777 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:50 crc kubenswrapper[4791]: I0218 00:57:50.991870 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.001125 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt899\" (UniqueName: \"kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899\") pod \"dnsmasq-dns-f6bc4c6c9-9skqg\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.009085 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.075606 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.079690 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.079862 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087274 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrs2f\" (UniqueName: \"kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087427 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087533 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087652 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087735 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnl6l\" (UniqueName: \"kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.087827 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.088915 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.107068 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.108928 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.128181 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5c6c54c7f5-xvkc2"] Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.132978 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrs2f\" (UniqueName: \"kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f\") pod \"heat-cfnapi-f5d4ccdc7-7kwld\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.135693 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.142522 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.142723 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.144927 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.154268 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.157275 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5c6c54c7f5-xvkc2"] Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.194096 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.194187 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnl6l\" (UniqueName: \"kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.194230 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.195491 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.200904 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.200961 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.202427 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.247837 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnl6l\" (UniqueName: \"kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l\") pod \"heat-api-74768d85bc-v29rm\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298434 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-public-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298506 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-etc-swift\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298563 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-combined-ca-bundle\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298606 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-run-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298638 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8n4w\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-kube-api-access-p8n4w\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-log-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298712 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-config-data\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.298757 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-internal-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.373388 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400231 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-combined-ca-bundle\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400299 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-run-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400336 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8n4w\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-kube-api-access-p8n4w\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400397 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-log-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400426 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-config-data\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400476 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-internal-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400525 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-public-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.400557 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-etc-swift\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.401547 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-log-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.402689 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f0488233-4091-4798-b96e-194d46245d44-run-httpd\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.404902 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-combined-ca-bundle\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.408025 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-public-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.408332 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-etc-swift\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.408942 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-internal-tls-certs\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.409463 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0488233-4091-4798-b96e-194d46245d44-config-data\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.418771 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8n4w\" (UniqueName: \"kubernetes.io/projected/f0488233-4091-4798-b96e-194d46245d44-kube-api-access-p8n4w\") pod \"swift-proxy-5c6c54c7f5-xvkc2\" (UID: \"f0488233-4091-4798-b96e-194d46245d44\") " pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:51 crc kubenswrapper[4791]: I0218 00:57:51.513932 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:57:52 crc kubenswrapper[4791]: I0218 00:57:52.567944 4791 generic.go:334] "Generic (PLEG): container finished" podID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerID="9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b" exitCode=0 Feb 18 00:57:52 crc kubenswrapper[4791]: I0218 00:57:52.568040 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerDied","Data":"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b"} Feb 18 00:57:53 crc kubenswrapper[4791]: I0218 00:57:53.011544 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.403480 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.405245 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-central-agent" containerID="cri-o://4465be71ab647b33970d4a204efcf7a436c78286539bc074a504e1389aa3d92f" gracePeriod=30 Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.405326 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="sg-core" containerID="cri-o://216248e56f84af6b908e0abad59dce5fb324f189ff519cc6095e06d70d532305" gracePeriod=30 Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.405372 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-notification-agent" containerID="cri-o://1d083905565b7442c7ab2e96d29ccc4ab05dc8aa982cac0277293ad347ae5d6d" gracePeriod=30 Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.405411 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="proxy-httpd" containerID="cri-o://676b2f0e14dc541f44cb23a5ea59f2519f4bbfe7d8ec509d3834ad5d0b5e3e82" gracePeriod=30 Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.437222 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.593576 4791 generic.go:334] "Generic (PLEG): container finished" podID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerID="216248e56f84af6b908e0abad59dce5fb324f189ff519cc6095e06d70d532305" exitCode=2 Feb 18 00:57:54 crc kubenswrapper[4791]: I0218 00:57:54.593628 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerDied","Data":"216248e56f84af6b908e0abad59dce5fb324f189ff519cc6095e06d70d532305"} Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.383755 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-nnnff"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.385493 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.399831 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nnnff"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.481663 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-snbt9"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.483203 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.494024 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.494108 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2drt\" (UniqueName: \"kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.499676 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-snbt9"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.514047 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2f60-account-create-update-kwdnz"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.515606 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.517948 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.526781 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2f60-account-create-update-kwdnz"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.581006 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-fw4wg"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.582432 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.589753 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fw4wg"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600547 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600611 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600648 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2drt\" (UniqueName: \"kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600716 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lnmg\" (UniqueName: \"kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600755 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600771 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600826 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zlw7\" (UniqueName: \"kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.600851 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv9w8\" (UniqueName: \"kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.601692 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.622277 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2drt\" (UniqueName: \"kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt\") pod \"nova-api-db-create-nnnff\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.622488 4791 generic.go:334] "Generic (PLEG): container finished" podID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerID="676b2f0e14dc541f44cb23a5ea59f2519f4bbfe7d8ec509d3834ad5d0b5e3e82" exitCode=0 Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.622518 4791 generic.go:334] "Generic (PLEG): container finished" podID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerID="4465be71ab647b33970d4a204efcf7a436c78286539bc074a504e1389aa3d92f" exitCode=0 Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.622538 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerDied","Data":"676b2f0e14dc541f44cb23a5ea59f2519f4bbfe7d8ec509d3834ad5d0b5e3e82"} Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.622560 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerDied","Data":"4465be71ab647b33970d4a204efcf7a436c78286539bc074a504e1389aa3d92f"} Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.679513 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-bbc3-account-create-update-cgjwl"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.688757 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.690653 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703174 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703242 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703270 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4lzv\" (UniqueName: \"kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703313 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lnmg\" (UniqueName: \"kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703352 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703371 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703430 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zlw7\" (UniqueName: \"kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.703454 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv9w8\" (UniqueName: \"kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.704253 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.704307 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.704356 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.720732 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnnff" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.725982 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-bbc3-account-create-update-cgjwl"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.726773 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lnmg\" (UniqueName: \"kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg\") pod \"nova-cell0-db-create-snbt9\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.738385 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zlw7\" (UniqueName: \"kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7\") pod \"nova-api-2f60-account-create-update-kwdnz\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.748814 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv9w8\" (UniqueName: \"kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8\") pod \"nova-cell1-db-create-fw4wg\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.805120 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4lzv\" (UniqueName: \"kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.805372 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.805998 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.806123 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.824222 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4lzv\" (UniqueName: \"kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv\") pod \"nova-cell0-bbc3-account-create-update-cgjwl\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.839601 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.905247 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-3f61-account-create-update-lt497"] Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.907195 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.918164 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.932672 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Feb 18 00:57:55 crc kubenswrapper[4791]: I0218 00:57:55.972716 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3f61-account-create-update-lt497"] Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.012537 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.016734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.016932 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p8s4\" (UniqueName: \"kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.122667 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p8s4\" (UniqueName: \"kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.123173 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.123888 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.149071 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p8s4\" (UniqueName: \"kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4\") pod \"nova-cell1-3f61-account-create-update-lt497\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.242282 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.785444 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8475878fc-kbpdv" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.800024 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.800101 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.882331 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.882610 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8694564946-rt9m5" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-api" containerID="cri-o://274b8e99cc69b5607a27e5b87eed51926d9c1ac4a2fed3e8f3527894e8ab7f5f" gracePeriod=30 Feb 18 00:57:56 crc kubenswrapper[4791]: I0218 00:57:56.883004 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8694564946-rt9m5" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-httpd" containerID="cri-o://8abe60f3df0b33e090dc83d4165c66f72192c57426bc2db0ec4358908d91390e" gracePeriod=30 Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.667320 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerStarted","Data":"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730"} Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.680588 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e10a79e5-3fc8-492a-9551-ee6cd80c2f83","Type":"ContainerStarted","Data":"21ef5b1a6bdbf2dafa3587b626ff4c2f78fa86d8c97d6a15684e611153d8113c"} Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.692270 4791 generic.go:334] "Generic (PLEG): container finished" podID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerID="8abe60f3df0b33e090dc83d4165c66f72192c57426bc2db0ec4358908d91390e" exitCode=0 Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.692324 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerDied","Data":"8abe60f3df0b33e090dc83d4165c66f72192c57426bc2db0ec4358908d91390e"} Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.711909 4791 generic.go:334] "Generic (PLEG): container finished" podID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerID="1d083905565b7442c7ab2e96d29ccc4ab05dc8aa982cac0277293ad347ae5d6d" exitCode=0 Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.711960 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerDied","Data":"1d083905565b7442c7ab2e96d29ccc4ab05dc8aa982cac0277293ad347ae5d6d"} Feb 18 00:57:57 crc kubenswrapper[4791]: I0218 00:57:57.771244 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wvm6h" podStartSLOduration=3.081054121 podStartE2EDuration="14.771223411s" podCreationTimestamp="2026-02-18 00:57:43 +0000 UTC" firstStartedPulling="2026-02-18 00:57:45.44600779 +0000 UTC m=+1407.014020960" lastFinishedPulling="2026-02-18 00:57:57.13617708 +0000 UTC m=+1418.704190250" observedRunningTime="2026-02-18 00:57:57.735483233 +0000 UTC m=+1419.303496403" watchObservedRunningTime="2026-02-18 00:57:57.771223411 +0000 UTC m=+1419.339236581" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.082107 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.087189409 podStartE2EDuration="14.082085513s" podCreationTimestamp="2026-02-18 00:57:44 +0000 UTC" firstStartedPulling="2026-02-18 00:57:45.602148999 +0000 UTC m=+1407.170162169" lastFinishedPulling="2026-02-18 00:57:56.597045103 +0000 UTC m=+1418.165058273" observedRunningTime="2026-02-18 00:57:57.766277757 +0000 UTC m=+1419.334290927" watchObservedRunningTime="2026-02-18 00:57:58.082085513 +0000 UTC m=+1419.650098683" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.099186 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-snbt9"] Feb 18 00:57:58 crc kubenswrapper[4791]: W0218 00:57:58.106863 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf729e438_68f7_48b7_9a93_e54e1da4c045.slice/crio-fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5 WatchSource:0}: Error finding container fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5: Status 404 returned error can't find the container with id fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5 Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.122592 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.148747 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2f60-account-create-update-kwdnz"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.503454 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fw4wg"] Feb 18 00:57:58 crc kubenswrapper[4791]: W0218 00:57:58.517640 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82100e2b_cadd_4a88_9599_2f0932deacce.slice/crio-c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac WatchSource:0}: Error finding container c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac: Status 404 returned error can't find the container with id c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.526659 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.554540 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-nnnff"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.597736 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616058 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616108 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616212 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616248 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616273 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616401 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h466x\" (UniqueName: \"kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.616425 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts\") pod \"6e095560-3137-46e5-bd5e-f03eee61fdb6\" (UID: \"6e095560-3137-46e5-bd5e-f03eee61fdb6\") " Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.618652 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.623225 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.660586 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x" (OuterVolumeSpecName: "kube-api-access-h466x") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "kube-api-access-h466x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.663189 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts" (OuterVolumeSpecName: "scripts") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.663421 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.664665 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.702573 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.727072 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5c6c54c7f5-xvkc2"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.733799 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.733833 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.733843 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6e095560-3137-46e5-bd5e-f03eee61fdb6-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.733854 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h466x\" (UniqueName: \"kubernetes.io/projected/6e095560-3137-46e5-bd5e-f03eee61fdb6-kube-api-access-h466x\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.733865 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.775263 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnnff" event={"ID":"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8","Type":"ContainerStarted","Data":"a2f468cb24d3721bd345192a8b66133d1a85d859d91f4f847bfd8b158da1634f"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.776764 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" event={"ID":"e21c26db-c068-4ee6-be23-2539a847b9d8","Type":"ContainerStarted","Data":"5e8eb2cb38e15551526acc464b9b38071df702bdc72abd7e75fa4befd6bda38a"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.777918 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f60-account-create-update-kwdnz" event={"ID":"6944b18f-146d-47ba-9d71-06d200fa828e","Type":"ContainerStarted","Data":"467d7b493bb118b406df39c7a96d48a7b75d575444ba161dcc63d3ce235e6204"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.777942 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f60-account-create-update-kwdnz" event={"ID":"6944b18f-146d-47ba-9d71-06d200fa828e","Type":"ContainerStarted","Data":"20567d488aae96bb3577b17aa2b97a71fe86ff9899b9a2aceaa5b5c31d1265d2"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.779934 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fw4wg" event={"ID":"82100e2b-cadd-4a88-9599-2f0932deacce","Type":"ContainerStarted","Data":"c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.788399 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" event={"ID":"f0488233-4091-4798-b96e-194d46245d44","Type":"ContainerStarted","Data":"e0d478347fd19f810c8479b625a3ad09f0f5feecccc7f40e45ad8cfb29c0ae43"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.802737 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5686f7fc6f-xd9xk" event={"ID":"f729e438-68f7-48b7-9a93-e54e1da4c045","Type":"ContainerStarted","Data":"f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.804293 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.804396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5686f7fc6f-xd9xk" event={"ID":"f729e438-68f7-48b7-9a93-e54e1da4c045","Type":"ContainerStarted","Data":"fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.808773 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" event={"ID":"26012547-d6fc-44de-9ad8-413d40acfb88","Type":"ContainerStarted","Data":"f0f96c39cd0023f46e3c1c818deef862f8e35d7176c1f2e77bc560252c96ca9d"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.825755 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-2f60-account-create-update-kwdnz" podStartSLOduration=3.825559574 podStartE2EDuration="3.825559574s" podCreationTimestamp="2026-02-18 00:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:58.797451923 +0000 UTC m=+1420.365465093" watchObservedRunningTime="2026-02-18 00:57:58.825559574 +0000 UTC m=+1420.393572744" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.840587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74768d85bc-v29rm" event={"ID":"3546ea0e-00fb-4c3a-a653-54fc9527457b","Type":"ContainerStarted","Data":"b3463f8bf232782e1862c959d4bfbfc0cdc894959eb60545e231b1ad19a4bdec"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.880280 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-bbc3-account-create-update-cgjwl"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.880509 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-5686f7fc6f-xd9xk" podStartSLOduration=8.880492266 podStartE2EDuration="8.880492266s" podCreationTimestamp="2026-02-18 00:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:58.828224626 +0000 UTC m=+1420.396237816" watchObservedRunningTime="2026-02-18 00:57:58.880492266 +0000 UTC m=+1420.448505436" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.903299 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6e095560-3137-46e5-bd5e-f03eee61fdb6","Type":"ContainerDied","Data":"e32538aced1a1f9061c40fb59b36f4ed2dba77af83aa13dccbf36fcfb76bb1cd"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.903351 4791 scope.go:117] "RemoveContainer" containerID="676b2f0e14dc541f44cb23a5ea59f2519f4bbfe7d8ec509d3834ad5d0b5e3e82" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.903569 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.912656 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3f61-account-create-update-lt497"] Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.920609 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data" (OuterVolumeSpecName: "config-data") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.923445 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-snbt9" event={"ID":"2e5c3f09-b080-4738-a696-f210249c18eb","Type":"ContainerStarted","Data":"6f0084dfc8173b974457439e3a895c9a0625785d1d98f5ad74f8aca329c3c2a8"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.923485 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-snbt9" event={"ID":"2e5c3f09-b080-4738-a696-f210249c18eb","Type":"ContainerStarted","Data":"39a1758b4448f5a430fd1803109af2324219ac599e4f4fe9d8e276a8cc54a478"} Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.933297 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e095560-3137-46e5-bd5e-f03eee61fdb6" (UID: "6e095560-3137-46e5-bd5e-f03eee61fdb6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.947599 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-snbt9" podStartSLOduration=3.9475813950000003 podStartE2EDuration="3.947581395s" podCreationTimestamp="2026-02-18 00:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:57:58.946136 +0000 UTC m=+1420.514149170" watchObservedRunningTime="2026-02-18 00:57:58.947581395 +0000 UTC m=+1420.515594565" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.969366 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:58 crc kubenswrapper[4791]: I0218 00:57:58.970864 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e095560-3137-46e5-bd5e-f03eee61fdb6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.097599 4791 scope.go:117] "RemoveContainer" containerID="216248e56f84af6b908e0abad59dce5fb324f189ff519cc6095e06d70d532305" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.180139 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:57:59 crc kubenswrapper[4791]: E0218 00:57:59.180937 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="sg-core" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.180951 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="sg-core" Feb 18 00:57:59 crc kubenswrapper[4791]: E0218 00:57:59.181051 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="proxy-httpd" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.181061 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="proxy-httpd" Feb 18 00:57:59 crc kubenswrapper[4791]: E0218 00:57:59.181380 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-central-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.181403 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-central-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: E0218 00:57:59.181427 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-notification-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.181435 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-notification-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.182695 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="sg-core" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.182730 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-central-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.182741 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="ceilometer-notification-agent" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.182759 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" containerName="proxy-httpd" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.190111 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.274372 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-b4df76d98-82cxf"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.289656 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.296890 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.297009 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.297087 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.297136 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ksbb\" (UniqueName: \"kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.305280 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.306727 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.315689 4791 scope.go:117] "RemoveContainer" containerID="1d083905565b7442c7ab2e96d29ccc4ab05dc8aa982cac0277293ad347ae5d6d" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.323949 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-b4df76d98-82cxf"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.342483 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.399866 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400020 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400064 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400097 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data-custom\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400116 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47cs2\" (UniqueName: \"kubernetes.io/projected/c31267c4-b30a-478e-b67a-0231013883df-kube-api-access-47cs2\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400144 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400195 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400224 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400250 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-combined-ca-bundle\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400290 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ksbb\" (UniqueName: \"kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400314 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kntn\" (UniqueName: \"kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.400359 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.403627 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.408425 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.408433 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.412298 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.434832 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ksbb\" (UniqueName: \"kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb\") pod \"heat-cfnapi-7f4b9bdf54-r459n\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.435334 4791 scope.go:117] "RemoveContainer" containerID="4465be71ab647b33970d4a204efcf7a436c78286539bc074a504e1389aa3d92f" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.502784 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kntn\" (UniqueName: \"kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.502930 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.502972 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.503771 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data-custom\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.503808 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47cs2\" (UniqueName: \"kubernetes.io/projected/c31267c4-b30a-478e-b67a-0231013883df-kube-api-access-47cs2\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.503844 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.504180 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.504231 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-combined-ca-bundle\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.507513 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.508233 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-config-data-custom\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.509125 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.512482 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.513273 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31267c4-b30a-478e-b67a-0231013883df-combined-ca-bundle\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.521482 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47cs2\" (UniqueName: \"kubernetes.io/projected/c31267c4-b30a-478e-b67a-0231013883df-kube-api-access-47cs2\") pod \"heat-engine-b4df76d98-82cxf\" (UID: \"c31267c4-b30a-478e-b67a-0231013883df\") " pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.522500 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kntn\" (UniqueName: \"kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.532962 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom\") pod \"heat-api-657c5fbcdb-dphwk\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.545684 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.601943 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.620628 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.640718 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.661639 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.664715 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.666587 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.667011 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.670303 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.732753 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.732792 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.732811 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.732950 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.733267 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.733549 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.733598 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tg7n\" (UniqueName: \"kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.839419 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.839967 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.840013 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tg7n\" (UniqueName: \"kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.840195 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.840239 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.840254 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.840329 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.842885 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.843732 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.848339 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.849782 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.853448 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.872216 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tg7n\" (UniqueName: \"kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.873185 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data\") pod \"ceilometer-0\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " pod="openstack/ceilometer-0" Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.987098 4791 generic.go:334] "Generic (PLEG): container finished" podID="2e5c3f09-b080-4738-a696-f210249c18eb" containerID="6f0084dfc8173b974457439e3a895c9a0625785d1d98f5ad74f8aca329c3c2a8" exitCode=0 Feb 18 00:57:59 crc kubenswrapper[4791]: I0218 00:57:59.987172 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-snbt9" event={"ID":"2e5c3f09-b080-4738-a696-f210249c18eb","Type":"ContainerDied","Data":"6f0084dfc8173b974457439e3a895c9a0625785d1d98f5ad74f8aca329c3c2a8"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.005241 4791 generic.go:334] "Generic (PLEG): container finished" podID="6944b18f-146d-47ba-9d71-06d200fa828e" containerID="467d7b493bb118b406df39c7a96d48a7b75d575444ba161dcc63d3ce235e6204" exitCode=0 Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.005397 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f60-account-create-update-kwdnz" event={"ID":"6944b18f-146d-47ba-9d71-06d200fa828e","Type":"ContainerDied","Data":"467d7b493bb118b406df39c7a96d48a7b75d575444ba161dcc63d3ce235e6204"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.035281 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" event={"ID":"2cbbe657-958b-4c43-a636-e04ac880613d","Type":"ContainerStarted","Data":"a897ac0c8a9a40988ec4490190b13115319b37cb8d5f74f09d618b172598cb10"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.035326 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" event={"ID":"2cbbe657-958b-4c43-a636-e04ac880613d","Type":"ContainerStarted","Data":"f6a83ab234ae1c066787eb8639439afabb0a47209093414c8d45c163a570ccdc"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.041760 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" event={"ID":"f0488233-4091-4798-b96e-194d46245d44","Type":"ContainerStarted","Data":"55fe4dc4ade19b09c17e45b1080cf0196a934239b44be60a777bf0db543f6c05"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.052778 4791 generic.go:334] "Generic (PLEG): container finished" podID="a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" containerID="57eea9c360d9bf5b07bb70c8dd62acf35b6c0cad6ca783b954a64ff6ab667bd0" exitCode=0 Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.052889 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnnff" event={"ID":"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8","Type":"ContainerDied","Data":"57eea9c360d9bf5b07bb70c8dd62acf35b6c0cad6ca783b954a64ff6ab667bd0"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.055803 4791 generic.go:334] "Generic (PLEG): container finished" podID="26012547-d6fc-44de-9ad8-413d40acfb88" containerID="921841858db002c0a71f098c0f7f4cbc7bdba9e7203e36a601069520aefea89e" exitCode=0 Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.055909 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" event={"ID":"26012547-d6fc-44de-9ad8-413d40acfb88","Type":"ContainerDied","Data":"921841858db002c0a71f098c0f7f4cbc7bdba9e7203e36a601069520aefea89e"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.064087 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" podStartSLOduration=5.064064954 podStartE2EDuration="5.064064954s" podCreationTimestamp="2026-02-18 00:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:00.047447169 +0000 UTC m=+1421.615460329" watchObservedRunningTime="2026-02-18 00:58:00.064064954 +0000 UTC m=+1421.632078124" Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.068189 4791 generic.go:334] "Generic (PLEG): container finished" podID="82100e2b-cadd-4a88-9599-2f0932deacce" containerID="d78c24646b65a67723b9ef8aaf19614c61665cd2fbc6736a8254ed38486948fa" exitCode=0 Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.068254 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fw4wg" event={"ID":"82100e2b-cadd-4a88-9599-2f0932deacce","Type":"ContainerDied","Data":"d78c24646b65a67723b9ef8aaf19614c61665cd2fbc6736a8254ed38486948fa"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.077357 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3f61-account-create-update-lt497" event={"ID":"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26","Type":"ContainerStarted","Data":"9415a5356d9543f305cfdc305e9ba5d3b9609ec86092e334636a914b51685726"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.077398 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3f61-account-create-update-lt497" event={"ID":"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26","Type":"ContainerStarted","Data":"47b789226fcbca64343a4c20ada94a197539a477a79f2f0111911ad3aedc5bb0"} Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.146633 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-3f61-account-create-update-lt497" podStartSLOduration=5.146612182 podStartE2EDuration="5.146612182s" podCreationTimestamp="2026-02-18 00:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:00.134397273 +0000 UTC m=+1421.702410443" watchObservedRunningTime="2026-02-18 00:58:00.146612182 +0000 UTC m=+1421.714625352" Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.281375 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.285696 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.365104 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-b4df76d98-82cxf"] Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.533076 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:58:00 crc kubenswrapper[4791]: I0218 00:58:00.886400 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.019596 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.101736 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e095560-3137-46e5-bd5e-f03eee61fdb6" path="/var/lib/kubelet/pods/6e095560-3137-46e5-bd5e-f03eee61fdb6/volumes" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.114261 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-657c5fbcdb-dphwk" event={"ID":"cea6666e-f991-44f6-bb6f-55cead75043f","Type":"ContainerStarted","Data":"50cacdccab64a591e41c700fd5268d99d5bd48d1982c41e15f529b00d00d21f2"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.118987 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" event={"ID":"81e44d40-acc5-4bae-a4dc-b9dd837fffef","Type":"ContainerStarted","Data":"0b2a88d3de4af50d90002f20a75565e63d056349935ba190c0efc3ade820183b"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.127463 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-b4df76d98-82cxf" event={"ID":"c31267c4-b30a-478e-b67a-0231013883df","Type":"ContainerStarted","Data":"d4d59551a5b6aa079a14d542a1e92422198d44ebd93bdfb6dcfdc5a484c642b9"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.127548 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-b4df76d98-82cxf" event={"ID":"c31267c4-b30a-478e-b67a-0231013883df","Type":"ContainerStarted","Data":"134aa90df1c293d6632fb2b943b4a1936bd540b01e474574452f8577716fef9e"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.128399 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.132777 4791 generic.go:334] "Generic (PLEG): container finished" podID="0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" containerID="9415a5356d9543f305cfdc305e9ba5d3b9609ec86092e334636a914b51685726" exitCode=0 Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.132833 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3f61-account-create-update-lt497" event={"ID":"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26","Type":"ContainerDied","Data":"9415a5356d9543f305cfdc305e9ba5d3b9609ec86092e334636a914b51685726"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.152179 4791 generic.go:334] "Generic (PLEG): container finished" podID="2cbbe657-958b-4c43-a636-e04ac880613d" containerID="a897ac0c8a9a40988ec4490190b13115319b37cb8d5f74f09d618b172598cb10" exitCode=0 Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.157393 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" event={"ID":"2cbbe657-958b-4c43-a636-e04ac880613d","Type":"ContainerDied","Data":"a897ac0c8a9a40988ec4490190b13115319b37cb8d5f74f09d618b172598cb10"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.164083 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-b4df76d98-82cxf" podStartSLOduration=2.164059143 podStartE2EDuration="2.164059143s" podCreationTimestamp="2026-02-18 00:57:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:01.148239142 +0000 UTC m=+1422.716252312" watchObservedRunningTime="2026-02-18 00:58:01.164059143 +0000 UTC m=+1422.732072313" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.183282 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" event={"ID":"f0488233-4091-4798-b96e-194d46245d44","Type":"ContainerStarted","Data":"b1efdfa0b590b272b87e06cea585152becc7d4f70ea5ce74646be05e85181fc6"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.185036 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerStarted","Data":"5da4b8347d7a7e0943e4134961ec84b0da35a8fa5734fe6285c26a4e0efe9b94"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.185134 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.185182 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.201561 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" event={"ID":"26012547-d6fc-44de-9ad8-413d40acfb88","Type":"ContainerStarted","Data":"b2d575b2f092ad1eebb2ee48db390d5784d8f14a4ffdc8c3defc64bb910245e7"} Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.201826 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.285591 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" podStartSLOduration=10.285570068 podStartE2EDuration="10.285570068s" podCreationTimestamp="2026-02-18 00:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:01.222498133 +0000 UTC m=+1422.790511313" watchObservedRunningTime="2026-02-18 00:58:01.285570068 +0000 UTC m=+1422.853583238" Feb 18 00:58:01 crc kubenswrapper[4791]: I0218 00:58:01.303599 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" podStartSLOduration=11.303581486 podStartE2EDuration="11.303581486s" podCreationTimestamp="2026-02-18 00:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:01.240673746 +0000 UTC m=+1422.808686916" watchObservedRunningTime="2026-02-18 00:58:01.303581486 +0000 UTC m=+1422.871594656" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.053825 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.082443 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.107286 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7d9d7f9648-qr5lg"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.122546 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.132078 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.132680 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.154368 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7b5c5fc9b-vzf74"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.156274 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.163266 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.165512 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.175071 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7d9d7f9648-qr5lg"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.186008 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7b5c5fc9b-vzf74"] Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.226332 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerStarted","Data":"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb"} Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.233442 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.233657 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-internal-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.233826 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data-custom\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.233959 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6nnn\" (UniqueName: \"kubernetes.io/projected/50df18fd-8515-4e5b-a699-98930a83e9a7-kube-api-access-n6nnn\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.234082 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-public-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.235623 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.235877 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb964\" (UniqueName: \"kubernetes.io/projected/a103b173-b84f-4c1d-bf8f-bf278b570051-kube-api-access-zb964\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.235988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-internal-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.236135 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data-custom\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.236300 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-public-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.236429 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-combined-ca-bundle\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.236647 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-combined-ca-bundle\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339077 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb964\" (UniqueName: \"kubernetes.io/projected/a103b173-b84f-4c1d-bf8f-bf278b570051-kube-api-access-zb964\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339148 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-internal-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339259 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data-custom\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339344 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-public-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339407 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-combined-ca-bundle\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339546 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-combined-ca-bundle\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339580 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339662 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-internal-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339702 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data-custom\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339725 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6nnn\" (UniqueName: \"kubernetes.io/projected/50df18fd-8515-4e5b-a699-98930a83e9a7-kube-api-access-n6nnn\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339750 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-public-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.339885 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.359310 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.366221 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-internal-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.366508 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-config-data-custom\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.366718 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-internal-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.368109 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-combined-ca-bundle\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.368629 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a103b173-b84f-4c1d-bf8f-bf278b570051-public-tls-certs\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.368943 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-combined-ca-bundle\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.372284 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.386982 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-config-data-custom\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.387506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/50df18fd-8515-4e5b-a699-98930a83e9a7-public-tls-certs\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.395071 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb964\" (UniqueName: \"kubernetes.io/projected/a103b173-b84f-4c1d-bf8f-bf278b570051-kube-api-access-zb964\") pod \"heat-cfnapi-7b5c5fc9b-vzf74\" (UID: \"a103b173-b84f-4c1d-bf8f-bf278b570051\") " pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.411755 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6nnn\" (UniqueName: \"kubernetes.io/projected/50df18fd-8515-4e5b-a699-98930a83e9a7-kube-api-access-n6nnn\") pod \"heat-api-7d9d7f9648-qr5lg\" (UID: \"50df18fd-8515-4e5b-a699-98930a83e9a7\") " pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.463226 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:02 crc kubenswrapper[4791]: I0218 00:58:02.497428 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.270913 4791 generic.go:334] "Generic (PLEG): container finished" podID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerID="274b8e99cc69b5607a27e5b87eed51926d9c1ac4a2fed3e8f3527894e8ab7f5f" exitCode=0 Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.271286 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerDied","Data":"274b8e99cc69b5607a27e5b87eed51926d9c1ac4a2fed3e8f3527894e8ab7f5f"} Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.933772 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.988890 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts\") pod \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.988967 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7p8s4\" (UniqueName: \"kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4\") pod \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\" (UID: \"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26\") " Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.990004 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" (UID: "0411e3ff-4c35-4daa-9d29-2f8a46e5ee26"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:03 crc kubenswrapper[4791]: I0218 00:58:03.996721 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.020088 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4" (OuterVolumeSpecName: "kube-api-access-7p8s4") pod "0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" (UID: "0411e3ff-4c35-4daa-9d29-2f8a46e5ee26"). InnerVolumeSpecName "kube-api-access-7p8s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.101030 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7p8s4\" (UniqueName: \"kubernetes.io/projected/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26-kube-api-access-7p8s4\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.290288 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3f61-account-create-update-lt497" event={"ID":"0411e3ff-4c35-4daa-9d29-2f8a46e5ee26","Type":"ContainerDied","Data":"47b789226fcbca64343a4c20ada94a197539a477a79f2f0111911ad3aedc5bb0"} Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.290334 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47b789226fcbca64343a4c20ada94a197539a477a79f2f0111911ad3aedc5bb0" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.290404 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3f61-account-create-update-lt497" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.390306 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:04 crc kubenswrapper[4791]: I0218 00:58:04.390382 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.273925 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.282306 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnnff" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.331426 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2drt\" (UniqueName: \"kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt\") pod \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.331829 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zlw7\" (UniqueName: \"kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7\") pod \"6944b18f-146d-47ba-9d71-06d200fa828e\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.331903 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts\") pod \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\" (UID: \"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.331950 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts\") pod \"6944b18f-146d-47ba-9d71-06d200fa828e\" (UID: \"6944b18f-146d-47ba-9d71-06d200fa828e\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.343507 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt" (OuterVolumeSpecName: "kube-api-access-h2drt") pod "a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" (UID: "a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8"). InnerVolumeSpecName "kube-api-access-h2drt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.346215 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6944b18f-146d-47ba-9d71-06d200fa828e" (UID: "6944b18f-146d-47ba-9d71-06d200fa828e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.348251 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" (UID: "a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.349179 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7" (OuterVolumeSpecName: "kube-api-access-2zlw7") pod "6944b18f-146d-47ba-9d71-06d200fa828e" (UID: "6944b18f-146d-47ba-9d71-06d200fa828e"). InnerVolumeSpecName "kube-api-access-2zlw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.364607 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.365375 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.365042 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.369057 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-snbt9" event={"ID":"2e5c3f09-b080-4738-a696-f210249c18eb","Type":"ContainerDied","Data":"39a1758b4448f5a430fd1803109af2324219ac599e4f4fe9d8e276a8cc54a478"} Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.369084 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39a1758b4448f5a430fd1803109af2324219ac599e4f4fe9d8e276a8cc54a478" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.371950 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f60-account-create-update-kwdnz" event={"ID":"6944b18f-146d-47ba-9d71-06d200fa828e","Type":"ContainerDied","Data":"20567d488aae96bb3577b17aa2b97a71fe86ff9899b9a2aceaa5b5c31d1265d2"} Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.371974 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20567d488aae96bb3577b17aa2b97a71fe86ff9899b9a2aceaa5b5c31d1265d2" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.372019 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f60-account-create-update-kwdnz" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.393250 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fw4wg" event={"ID":"82100e2b-cadd-4a88-9599-2f0932deacce","Type":"ContainerDied","Data":"c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac"} Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.393292 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6c447bfad1eaec62bb97135cc8703e2cf4f73202f47f0106325d847c46f36ac" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.393303 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fw4wg" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.420371 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.420904 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-bbc3-account-create-update-cgjwl" event={"ID":"2cbbe657-958b-4c43-a636-e04ac880613d","Type":"ContainerDied","Data":"f6a83ab234ae1c066787eb8639439afabb0a47209093414c8d45c163a570ccdc"} Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.420928 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6a83ab234ae1c066787eb8639439afabb0a47209093414c8d45c163a570ccdc" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pv9w8\" (UniqueName: \"kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8\") pod \"82100e2b-cadd-4a88-9599-2f0932deacce\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447464 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts\") pod \"2cbbe657-958b-4c43-a636-e04ac880613d\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447518 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts\") pod \"82100e2b-cadd-4a88-9599-2f0932deacce\" (UID: \"82100e2b-cadd-4a88-9599-2f0932deacce\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447595 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts\") pod \"2e5c3f09-b080-4738-a696-f210249c18eb\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447640 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4lzv\" (UniqueName: \"kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv\") pod \"2cbbe657-958b-4c43-a636-e04ac880613d\" (UID: \"2cbbe657-958b-4c43-a636-e04ac880613d\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447703 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lnmg\" (UniqueName: \"kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg\") pod \"2e5c3f09-b080-4738-a696-f210249c18eb\" (UID: \"2e5c3f09-b080-4738-a696-f210249c18eb\") " Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.447830 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-nnnff" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.448619 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82100e2b-cadd-4a88-9599-2f0932deacce" (UID: "82100e2b-cadd-4a88-9599-2f0932deacce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.449273 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e5c3f09-b080-4738-a696-f210249c18eb" (UID: "2e5c3f09-b080-4738-a696-f210249c18eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.450658 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zlw7\" (UniqueName: \"kubernetes.io/projected/6944b18f-146d-47ba-9d71-06d200fa828e-kube-api-access-2zlw7\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.450673 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2cbbe657-958b-4c43-a636-e04ac880613d" (UID: "2cbbe657-958b-4c43-a636-e04ac880613d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.454981 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.455342 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6944b18f-146d-47ba-9d71-06d200fa828e-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.455473 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2drt\" (UniqueName: \"kubernetes.io/projected/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8-kube-api-access-h2drt\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.456476 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg" (OuterVolumeSpecName: "kube-api-access-2lnmg") pod "2e5c3f09-b080-4738-a696-f210249c18eb" (UID: "2e5c3f09-b080-4738-a696-f210249c18eb"). InnerVolumeSpecName "kube-api-access-2lnmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.460434 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8" (OuterVolumeSpecName: "kube-api-access-pv9w8") pod "82100e2b-cadd-4a88-9599-2f0932deacce" (UID: "82100e2b-cadd-4a88-9599-2f0932deacce"). InnerVolumeSpecName "kube-api-access-pv9w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.460924 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv" (OuterVolumeSpecName: "kube-api-access-f4lzv") pod "2cbbe657-958b-4c43-a636-e04ac880613d" (UID: "2cbbe657-958b-4c43-a636-e04ac880613d"). InnerVolumeSpecName "kube-api-access-f4lzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.467361 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wvm6h" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" probeResult="failure" output=< Feb 18 00:58:05 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 00:58:05 crc kubenswrapper[4791]: > Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.475864 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-nnnff" event={"ID":"a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8","Type":"ContainerDied","Data":"a2f468cb24d3721bd345192a8b66133d1a85d859d91f4f847bfd8b158da1634f"} Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.475923 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2f468cb24d3721bd345192a8b66133d1a85d859d91f4f847bfd8b158da1634f" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.562113 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4lzv\" (UniqueName: \"kubernetes.io/projected/2cbbe657-958b-4c43-a636-e04ac880613d-kube-api-access-f4lzv\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.562341 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lnmg\" (UniqueName: \"kubernetes.io/projected/2e5c3f09-b080-4738-a696-f210249c18eb-kube-api-access-2lnmg\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.562894 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pv9w8\" (UniqueName: \"kubernetes.io/projected/82100e2b-cadd-4a88-9599-2f0932deacce-kube-api-access-pv9w8\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.562967 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cbbe657-958b-4c43-a636-e04ac880613d-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.563041 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82100e2b-cadd-4a88-9599-2f0932deacce-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.563105 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e5c3f09-b080-4738-a696-f210249c18eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.890307 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7d9d7f9648-qr5lg"] Feb 18 00:58:05 crc kubenswrapper[4791]: I0218 00:58:05.901288 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7b5c5fc9b-vzf74"] Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.097312 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.262261 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.262515 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="dnsmasq-dns" containerID="cri-o://7fbd46809c4d53a9a7508b40e630426d31eaa2554b9639bf1fc6eb11e186767a" gracePeriod=10 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.426977 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.530888 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs\") pod \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.531345 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle\") pod \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.531395 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config\") pod \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.531471 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvjwb\" (UniqueName: \"kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb\") pod \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.531545 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config\") pod \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\" (UID: \"7c4f2d33-853b-4b26-9050-a95f8a0aacff\") " Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.554417 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb" (OuterVolumeSpecName: "kube-api-access-cvjwb") pod "7c4f2d33-853b-4b26-9050-a95f8a0aacff" (UID: "7c4f2d33-853b-4b26-9050-a95f8a0aacff"). InnerVolumeSpecName "kube-api-access-cvjwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.554754 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7c4f2d33-853b-4b26-9050-a95f8a0aacff" (UID: "7c4f2d33-853b-4b26-9050-a95f8a0aacff"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.572398 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.573864 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" event={"ID":"e21c26db-c068-4ee6-be23-2539a847b9d8","Type":"ContainerStarted","Data":"440ee2c00e7d300bf8a8b18e44ec7c9726274b5db6e2405a55b4c05dd7fbc1d6"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.573904 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" podUID="e21c26db-c068-4ee6-be23-2539a847b9d8" containerName="heat-cfnapi" containerID="cri-o://440ee2c00e7d300bf8a8b18e44ec7c9726274b5db6e2405a55b4c05dd7fbc1d6" gracePeriod=60 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.573947 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5c6c54c7f5-xvkc2" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.573961 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.603532 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8694564946-rt9m5" event={"ID":"7c4f2d33-853b-4b26-9050-a95f8a0aacff","Type":"ContainerDied","Data":"a604e5c5f0f3ffae4286698e7913c6b9f92fd1bd495cbff2723c520b5823d683"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.603584 4791 scope.go:117] "RemoveContainer" containerID="8abe60f3df0b33e090dc83d4165c66f72192c57426bc2db0ec4358908d91390e" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.603725 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8694564946-rt9m5" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.634614 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerStarted","Data":"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.639134 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-httpd-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.651243 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvjwb\" (UniqueName: \"kubernetes.io/projected/7c4f2d33-853b-4b26-9050-a95f8a0aacff-kube-api-access-cvjwb\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.640051 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" podStartSLOduration=10.183491926 podStartE2EDuration="16.640033809s" podCreationTimestamp="2026-02-18 00:57:50 +0000 UTC" firstStartedPulling="2026-02-18 00:57:58.625139773 +0000 UTC m=+1420.193152933" lastFinishedPulling="2026-02-18 00:58:05.081681646 +0000 UTC m=+1426.649694816" observedRunningTime="2026-02-18 00:58:06.623052413 +0000 UTC m=+1428.191065583" watchObservedRunningTime="2026-02-18 00:58:06.640033809 +0000 UTC m=+1428.208046979" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.655704 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-74768d85bc-v29rm" podUID="3546ea0e-00fb-4c3a-a653-54fc9527457b" containerName="heat-api" containerID="cri-o://77fb6dfb5b6586dbc3650937321e102d9772e2b1393976dd61111617153d1186" gracePeriod=60 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.656061 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74768d85bc-v29rm" event={"ID":"3546ea0e-00fb-4c3a-a653-54fc9527457b","Type":"ContainerStarted","Data":"77fb6dfb5b6586dbc3650937321e102d9772e2b1393976dd61111617153d1186"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.656101 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.672399 4791 scope.go:117] "RemoveContainer" containerID="274b8e99cc69b5607a27e5b87eed51926d9c1ac4a2fed3e8f3527894e8ab7f5f" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.673460 4791 generic.go:334] "Generic (PLEG): container finished" podID="825d75de-0281-4172-8f86-e2c23e4a818a" containerID="7fbd46809c4d53a9a7508b40e630426d31eaa2554b9639bf1fc6eb11e186767a" exitCode=0 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.673574 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" event={"ID":"825d75de-0281-4172-8f86-e2c23e4a818a","Type":"ContainerDied","Data":"7fbd46809c4d53a9a7508b40e630426d31eaa2554b9639bf1fc6eb11e186767a"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.684021 4791 generic.go:334] "Generic (PLEG): container finished" podID="cea6666e-f991-44f6-bb6f-55cead75043f" containerID="24db7d9614a1f78803772b290b51aca9c37dfdcdbf5f1d6740987c13a6b38ceb" exitCode=1 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.684090 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-657c5fbcdb-dphwk" event={"ID":"cea6666e-f991-44f6-bb6f-55cead75043f","Type":"ContainerDied","Data":"24db7d9614a1f78803772b290b51aca9c37dfdcdbf5f1d6740987c13a6b38ceb"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.685052 4791 scope.go:117] "RemoveContainer" containerID="24db7d9614a1f78803772b290b51aca9c37dfdcdbf5f1d6740987c13a6b38ceb" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.708063 4791 generic.go:334] "Generic (PLEG): container finished" podID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerID="8cdb7d76bdc4f9e65b8d175da0e0086300db841290966073dd830405a7d3c74c" exitCode=1 Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.708129 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" event={"ID":"81e44d40-acc5-4bae-a4dc-b9dd837fffef","Type":"ContainerDied","Data":"8cdb7d76bdc4f9e65b8d175da0e0086300db841290966073dd830405a7d3c74c"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.709023 4791 scope.go:117] "RemoveContainer" containerID="8cdb7d76bdc4f9e65b8d175da0e0086300db841290966073dd830405a7d3c74c" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.718380 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7d9d7f9648-qr5lg" event={"ID":"50df18fd-8515-4e5b-a699-98930a83e9a7","Type":"ContainerStarted","Data":"3f2f5eb597b2df548269da619941926f10ccbda5ec9e38c1e2ab01f647568191"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.740874 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-snbt9" Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.741785 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" event={"ID":"a103b173-b84f-4c1d-bf8f-bf278b570051","Type":"ContainerStarted","Data":"5d76b15549a2208efafe8e8cda3962f199126bfc181dcf08406b55fdbe55ef74"} Feb 18 00:58:06 crc kubenswrapper[4791]: I0218 00:58:06.844211 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-74768d85bc-v29rm" podStartSLOduration=10.471856481 podStartE2EDuration="16.844187436s" podCreationTimestamp="2026-02-18 00:57:50 +0000 UTC" firstStartedPulling="2026-02-18 00:57:58.67897027 +0000 UTC m=+1420.246983440" lastFinishedPulling="2026-02-18 00:58:05.051301235 +0000 UTC m=+1426.619314395" observedRunningTime="2026-02-18 00:58:06.68135517 +0000 UTC m=+1428.249368340" watchObservedRunningTime="2026-02-18 00:58:06.844187436 +0000 UTC m=+1428.412200606" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.012034 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config" (OuterVolumeSpecName: "config") pod "7c4f2d33-853b-4b26-9050-a95f8a0aacff" (UID: "7c4f2d33-853b-4b26-9050-a95f8a0aacff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.073549 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c4f2d33-853b-4b26-9050-a95f8a0aacff" (UID: "7c4f2d33-853b-4b26-9050-a95f8a0aacff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.119896 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.119923 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.124445 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7c4f2d33-853b-4b26-9050-a95f8a0aacff" (UID: "7c4f2d33-853b-4b26-9050-a95f8a0aacff"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.222612 4791 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c4f2d33-853b-4b26-9050-a95f8a0aacff-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.424602 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.430142 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.440928 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8694564946-rt9m5"] Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.539302 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.539733 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b46wt\" (UniqueName: \"kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.539757 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.539837 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.539891 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.540060 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb\") pod \"825d75de-0281-4172-8f86-e2c23e4a818a\" (UID: \"825d75de-0281-4172-8f86-e2c23e4a818a\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.554441 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt" (OuterVolumeSpecName: "kube-api-access-b46wt") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "kube-api-access-b46wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.642965 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b46wt\" (UniqueName: \"kubernetes.io/projected/825d75de-0281-4172-8f86-e2c23e4a818a-kube-api-access-b46wt\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.775296 4791 generic.go:334] "Generic (PLEG): container finished" podID="e21c26db-c068-4ee6-be23-2539a847b9d8" containerID="440ee2c00e7d300bf8a8b18e44ec7c9726274b5db6e2405a55b4c05dd7fbc1d6" exitCode=0 Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.775358 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" event={"ID":"e21c26db-c068-4ee6-be23-2539a847b9d8","Type":"ContainerDied","Data":"440ee2c00e7d300bf8a8b18e44ec7c9726274b5db6e2405a55b4c05dd7fbc1d6"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.775385 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" event={"ID":"e21c26db-c068-4ee6-be23-2539a847b9d8","Type":"ContainerDied","Data":"5e8eb2cb38e15551526acc464b9b38071df702bdc72abd7e75fa4befd6bda38a"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.775395 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e8eb2cb38e15551526acc464b9b38071df702bdc72abd7e75fa4befd6bda38a" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.778392 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" event={"ID":"825d75de-0281-4172-8f86-e2c23e4a818a","Type":"ContainerDied","Data":"542192bdb996760c36ab61f7f1c5d18795ceb4c2a470340789d62d29897918d5"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.778424 4791 scope.go:117] "RemoveContainer" containerID="7fbd46809c4d53a9a7508b40e630426d31eaa2554b9639bf1fc6eb11e186767a" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.778515 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-82pnf" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.788604 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7d9d7f9648-qr5lg" event={"ID":"50df18fd-8515-4e5b-a699-98930a83e9a7","Type":"ContainerStarted","Data":"f1253610bb8dc61aaacd60f23d4a6e435d298344a643767167f73af26b6a5405"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.788685 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.799539 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" event={"ID":"a103b173-b84f-4c1d-bf8f-bf278b570051","Type":"ContainerStarted","Data":"924fb50aa56a57a481c16b72e5e5033cc10b2b5247f29cb7f34e7013f68bcd62"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.800542 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.803947 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.822362 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.822529 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerStarted","Data":"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.823178 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7d9d7f9648-qr5lg" podStartSLOduration=5.8231512930000005 podStartE2EDuration="5.823151293s" podCreationTimestamp="2026-02-18 00:58:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:07.805289789 +0000 UTC m=+1429.373302959" watchObservedRunningTime="2026-02-18 00:58:07.823151293 +0000 UTC m=+1429.391164453" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.835078 4791 generic.go:334] "Generic (PLEG): container finished" podID="3546ea0e-00fb-4c3a-a653-54fc9527457b" containerID="77fb6dfb5b6586dbc3650937321e102d9772e2b1393976dd61111617153d1186" exitCode=0 Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.835532 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74768d85bc-v29rm" event={"ID":"3546ea0e-00fb-4c3a-a653-54fc9527457b","Type":"ContainerDied","Data":"77fb6dfb5b6586dbc3650937321e102d9772e2b1393976dd61111617153d1186"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.835558 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74768d85bc-v29rm" event={"ID":"3546ea0e-00fb-4c3a-a653-54fc9527457b","Type":"ContainerDied","Data":"b3463f8bf232782e1862c959d4bfbfc0cdc894959eb60545e231b1ad19a4bdec"} Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.835567 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3463f8bf232782e1862c959d4bfbfc0cdc894959eb60545e231b1ad19a4bdec" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.835626 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config" (OuterVolumeSpecName: "config") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.843236 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.844099 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" podStartSLOduration=5.844086322 podStartE2EDuration="5.844086322s" podCreationTimestamp="2026-02-18 00:58:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:07.827308732 +0000 UTC m=+1429.395321902" watchObservedRunningTime="2026-02-18 00:58:07.844086322 +0000 UTC m=+1429.412099492" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.860456 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.861699 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.861745 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.861756 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.861770 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.873839 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "825d75de-0281-4172-8f86-e2c23e4a818a" (UID: "825d75de-0281-4172-8f86-e2c23e4a818a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.878008 4791 scope.go:117] "RemoveContainer" containerID="a16214bfe2450f85d637539c5f2856edc73efb431a901036708d6adb857a45f6" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.883383 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.962627 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnl6l\" (UniqueName: \"kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l\") pod \"3546ea0e-00fb-4c3a-a653-54fc9527457b\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.962756 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data\") pod \"e21c26db-c068-4ee6-be23-2539a847b9d8\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.962864 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data\") pod \"3546ea0e-00fb-4c3a-a653-54fc9527457b\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.962906 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle\") pod \"3546ea0e-00fb-4c3a-a653-54fc9527457b\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.963037 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrs2f\" (UniqueName: \"kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f\") pod \"e21c26db-c068-4ee6-be23-2539a847b9d8\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.963069 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle\") pod \"e21c26db-c068-4ee6-be23-2539a847b9d8\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.963131 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom\") pod \"e21c26db-c068-4ee6-be23-2539a847b9d8\" (UID: \"e21c26db-c068-4ee6-be23-2539a847b9d8\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.963224 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom\") pod \"3546ea0e-00fb-4c3a-a653-54fc9527457b\" (UID: \"3546ea0e-00fb-4c3a-a653-54fc9527457b\") " Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.963757 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/825d75de-0281-4172-8f86-e2c23e4a818a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.975421 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3546ea0e-00fb-4c3a-a653-54fc9527457b" (UID: "3546ea0e-00fb-4c3a-a653-54fc9527457b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.992441 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l" (OuterVolumeSpecName: "kube-api-access-rnl6l") pod "3546ea0e-00fb-4c3a-a653-54fc9527457b" (UID: "3546ea0e-00fb-4c3a-a653-54fc9527457b"). InnerVolumeSpecName "kube-api-access-rnl6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:07 crc kubenswrapper[4791]: I0218 00:58:07.992555 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e21c26db-c068-4ee6-be23-2539a847b9d8" (UID: "e21c26db-c068-4ee6-be23-2539a847b9d8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.006395 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f" (OuterVolumeSpecName: "kube-api-access-nrs2f") pod "e21c26db-c068-4ee6-be23-2539a847b9d8" (UID: "e21c26db-c068-4ee6-be23-2539a847b9d8"). InnerVolumeSpecName "kube-api-access-nrs2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.065858 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrs2f\" (UniqueName: \"kubernetes.io/projected/e21c26db-c068-4ee6-be23-2539a847b9d8-kube-api-access-nrs2f\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.066026 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.066042 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.066051 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnl6l\" (UniqueName: \"kubernetes.io/projected/3546ea0e-00fb-4c3a-a653-54fc9527457b-kube-api-access-rnl6l\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.077791 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e21c26db-c068-4ee6-be23-2539a847b9d8" (UID: "e21c26db-c068-4ee6-be23-2539a847b9d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.081526 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3546ea0e-00fb-4c3a-a653-54fc9527457b" (UID: "3546ea0e-00fb-4c3a-a653-54fc9527457b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.131763 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data" (OuterVolumeSpecName: "config-data") pod "e21c26db-c068-4ee6-be23-2539a847b9d8" (UID: "e21c26db-c068-4ee6-be23-2539a847b9d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.155404 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data" (OuterVolumeSpecName: "config-data") pod "3546ea0e-00fb-4c3a-a653-54fc9527457b" (UID: "3546ea0e-00fb-4c3a-a653-54fc9527457b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.168299 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.168339 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.168349 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e21c26db-c068-4ee6-be23-2539a847b9d8-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.168358 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3546ea0e-00fb-4c3a-a653-54fc9527457b-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.240154 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.254266 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-82pnf"] Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.847590 4791 generic.go:334] "Generic (PLEG): container finished" podID="cea6666e-f991-44f6-bb6f-55cead75043f" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" exitCode=1 Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.847654 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-657c5fbcdb-dphwk" event={"ID":"cea6666e-f991-44f6-bb6f-55cead75043f","Type":"ContainerDied","Data":"9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33"} Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.847950 4791 scope.go:117] "RemoveContainer" containerID="24db7d9614a1f78803772b290b51aca9c37dfdcdbf5f1d6740987c13a6b38ceb" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.848393 4791 scope.go:117] "RemoveContainer" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" Feb 18 00:58:08 crc kubenswrapper[4791]: E0218 00:58:08.848699 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-657c5fbcdb-dphwk_openstack(cea6666e-f991-44f6-bb6f-55cead75043f)\"" pod="openstack/heat-api-657c5fbcdb-dphwk" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.850194 4791 generic.go:334] "Generic (PLEG): container finished" podID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerID="c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd" exitCode=1 Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.850263 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" event={"ID":"81e44d40-acc5-4bae-a4dc-b9dd837fffef","Type":"ContainerDied","Data":"c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd"} Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.850933 4791 scope.go:117] "RemoveContainer" containerID="c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd" Feb 18 00:58:08 crc kubenswrapper[4791]: E0218 00:58:08.851201 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7f4b9bdf54-r459n_openstack(81e44d40-acc5-4bae-a4dc-b9dd837fffef)\"" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.854045 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74768d85bc-v29rm" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.857370 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerStarted","Data":"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f"} Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.857830 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-f5d4ccdc7-7kwld" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.889313 4791 scope.go:117] "RemoveContainer" containerID="8cdb7d76bdc4f9e65b8d175da0e0086300db841290966073dd830405a7d3c74c" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.932930 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.37206728 podStartE2EDuration="9.932906892s" podCreationTimestamp="2026-02-18 00:57:59 +0000 UTC" firstStartedPulling="2026-02-18 00:58:00.887694268 +0000 UTC m=+1422.455707438" lastFinishedPulling="2026-02-18 00:58:08.44853388 +0000 UTC m=+1430.016547050" observedRunningTime="2026-02-18 00:58:08.896617829 +0000 UTC m=+1430.464630999" watchObservedRunningTime="2026-02-18 00:58:08.932906892 +0000 UTC m=+1430.500920052" Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.983989 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:58:08 crc kubenswrapper[4791]: I0218 00:58:08.997239 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-f5d4ccdc7-7kwld"] Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.037530 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.048098 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-74768d85bc-v29rm"] Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.079550 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3546ea0e-00fb-4c3a-a653-54fc9527457b" path="/var/lib/kubelet/pods/3546ea0e-00fb-4c3a-a653-54fc9527457b/volumes" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.080682 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" path="/var/lib/kubelet/pods/7c4f2d33-853b-4b26-9050-a95f8a0aacff/volumes" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.083642 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" path="/var/lib/kubelet/pods/825d75de-0281-4172-8f86-e2c23e4a818a/volumes" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.084386 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e21c26db-c068-4ee6-be23-2539a847b9d8" path="/var/lib/kubelet/pods/e21c26db-c068-4ee6-be23-2539a847b9d8/volumes" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.141800 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.144472 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-568cb6f944-cxt84" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.255961 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.256217 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-956c96f98-prlnm" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-log" containerID="cri-o://aba8ab6d89e6ccce846772572bb70879600f867434c61a8143a38ae0f130b5f2" gracePeriod=30 Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.256320 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-956c96f98-prlnm" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-api" containerID="cri-o://44616c0ca617af27909f2cdd1af40974c7443bf98100ca11a1e3b3c5f50a56f9" gracePeriod=30 Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.546535 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.546576 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.867400 4791 generic.go:334] "Generic (PLEG): container finished" podID="0a531661-ab4d-4689-8dd6-a1627232f871" containerID="aba8ab6d89e6ccce846772572bb70879600f867434c61a8143a38ae0f130b5f2" exitCode=143 Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.867465 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerDied","Data":"aba8ab6d89e6ccce846772572bb70879600f867434c61a8143a38ae0f130b5f2"} Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.871739 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.871917 4791 scope.go:117] "RemoveContainer" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" Feb 18 00:58:09 crc kubenswrapper[4791]: I0218 00:58:09.871954 4791 scope.go:117] "RemoveContainer" containerID="c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd" Feb 18 00:58:09 crc kubenswrapper[4791]: E0218 00:58:09.872124 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-657c5fbcdb-dphwk_openstack(cea6666e-f991-44f6-bb6f-55cead75043f)\"" pod="openstack/heat-api-657c5fbcdb-dphwk" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" Feb 18 00:58:09 crc kubenswrapper[4791]: E0218 00:58:09.872201 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7f4b9bdf54-r459n_openstack(81e44d40-acc5-4bae-a4dc-b9dd837fffef)\"" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.046234 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.287427 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.287502 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.882417 4791 scope.go:117] "RemoveContainer" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" Feb 18 00:58:10 crc kubenswrapper[4791]: E0218 00:58:10.882731 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-657c5fbcdb-dphwk_openstack(cea6666e-f991-44f6-bb6f-55cead75043f)\"" pod="openstack/heat-api-657c5fbcdb-dphwk" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.883105 4791 scope.go:117] "RemoveContainer" containerID="c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd" Feb 18 00:58:10 crc kubenswrapper[4791]: E0218 00:58:10.883405 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7f4b9bdf54-r459n_openstack(81e44d40-acc5-4bae-a4dc-b9dd837fffef)\"" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" Feb 18 00:58:10 crc kubenswrapper[4791]: I0218 00:58:10.963891 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.152990 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jpnb"] Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153579 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e21c26db-c068-4ee6-be23-2539a847b9d8" containerName="heat-cfnapi" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153597 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e21c26db-c068-4ee6-be23-2539a847b9d8" containerName="heat-cfnapi" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153615 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153622 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153636 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153641 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153651 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="dnsmasq-dns" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153657 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="dnsmasq-dns" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153665 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5c3f09-b080-4738-a696-f210249c18eb" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153671 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5c3f09-b080-4738-a696-f210249c18eb" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153684 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="init" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153689 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="init" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153697 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-api" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153768 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-api" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153791 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6944b18f-146d-47ba-9d71-06d200fa828e" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153797 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6944b18f-146d-47ba-9d71-06d200fa828e" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153808 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-httpd" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153814 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-httpd" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153828 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82100e2b-cadd-4a88-9599-2f0932deacce" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153833 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="82100e2b-cadd-4a88-9599-2f0932deacce" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153847 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cbbe657-958b-4c43-a636-e04ac880613d" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153852 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cbbe657-958b-4c43-a636-e04ac880613d" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.153862 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3546ea0e-00fb-4c3a-a653-54fc9527457b" containerName="heat-api" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.153869 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3546ea0e-00fb-4c3a-a653-54fc9527457b" containerName="heat-api" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154063 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-httpd" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154075 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e5c3f09-b080-4738-a696-f210249c18eb" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154085 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="825d75de-0281-4172-8f86-e2c23e4a818a" containerName="dnsmasq-dns" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154094 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3546ea0e-00fb-4c3a-a653-54fc9527457b" containerName="heat-api" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154106 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cbbe657-958b-4c43-a636-e04ac880613d" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154120 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="82100e2b-cadd-4a88-9599-2f0932deacce" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154133 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6944b18f-146d-47ba-9d71-06d200fa828e" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154142 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" containerName="mariadb-database-create" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154176 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" containerName="mariadb-account-create-update" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154189 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e21c26db-c068-4ee6-be23-2539a847b9d8" containerName="heat-cfnapi" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.154196 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c4f2d33-853b-4b26-9050-a95f8a0aacff" containerName="neutron-api" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.166364 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.169058 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.169230 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.169360 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-cpczt" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.173961 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jpnb"] Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.278103 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tgs9\" (UniqueName: \"kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.278333 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.278456 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.278715 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.380633 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.380959 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tgs9\" (UniqueName: \"kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.381117 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.381248 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.387460 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.405892 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.406689 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.408222 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tgs9\" (UniqueName: \"kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9\") pod \"nova-cell0-conductor-db-sync-6jpnb\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.487466 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.890928 4791 scope.go:117] "RemoveContainer" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.891460 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-central-agent" containerID="cri-o://a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb" gracePeriod=30 Feb 18 00:58:11 crc kubenswrapper[4791]: E0218 00:58:11.891685 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-657c5fbcdb-dphwk_openstack(cea6666e-f991-44f6-bb6f-55cead75043f)\"" pod="openstack/heat-api-657c5fbcdb-dphwk" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.891804 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="proxy-httpd" containerID="cri-o://65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f" gracePeriod=30 Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.891861 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="sg-core" containerID="cri-o://77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1" gracePeriod=30 Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.891896 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-notification-agent" containerID="cri-o://d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee" gracePeriod=30 Feb 18 00:58:11 crc kubenswrapper[4791]: W0218 00:58:11.960703 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1d1a31c_1309_4888_8122_16d6be151b19.slice/crio-a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d WatchSource:0}: Error finding container a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d: Status 404 returned error can't find the container with id a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d Feb 18 00:58:11 crc kubenswrapper[4791]: I0218 00:58:11.965862 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jpnb"] Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.922351 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" event={"ID":"f1d1a31c-1309-4888-8122-16d6be151b19","Type":"ContainerStarted","Data":"a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d"} Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.930032 4791 generic.go:334] "Generic (PLEG): container finished" podID="0a531661-ab4d-4689-8dd6-a1627232f871" containerID="44616c0ca617af27909f2cdd1af40974c7443bf98100ca11a1e3b3c5f50a56f9" exitCode=0 Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.930099 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerDied","Data":"44616c0ca617af27909f2cdd1af40974c7443bf98100ca11a1e3b3c5f50a56f9"} Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934196 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fd9dd05-384e-406a-b195-af283d2807ad" containerID="65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f" exitCode=0 Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934227 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fd9dd05-384e-406a-b195-af283d2807ad" containerID="77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1" exitCode=2 Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934240 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fd9dd05-384e-406a-b195-af283d2807ad" containerID="d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee" exitCode=0 Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934234 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerDied","Data":"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f"} Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934290 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerDied","Data":"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1"} Feb 18 00:58:12 crc kubenswrapper[4791]: I0218 00:58:12.934305 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerDied","Data":"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee"} Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.072975 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.228688 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.228793 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.228849 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wf5l2\" (UniqueName: \"kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.228947 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.229060 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.229128 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.229193 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts\") pod \"0a531661-ab4d-4689-8dd6-a1627232f871\" (UID: \"0a531661-ab4d-4689-8dd6-a1627232f871\") " Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.229845 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs" (OuterVolumeSpecName: "logs") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.230081 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a531661-ab4d-4689-8dd6-a1627232f871-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.235583 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts" (OuterVolumeSpecName: "scripts") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.256652 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2" (OuterVolumeSpecName: "kube-api-access-wf5l2") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "kube-api-access-wf5l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.289891 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.312202 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data" (OuterVolumeSpecName: "config-data") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.332904 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wf5l2\" (UniqueName: \"kubernetes.io/projected/0a531661-ab4d-4689-8dd6-a1627232f871-kube-api-access-wf5l2\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.332938 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.332948 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.332957 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.401796 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.415263 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0a531661-ab4d-4689-8dd6-a1627232f871" (UID: "0a531661-ab4d-4689-8dd6-a1627232f871"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.434961 4791 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.434986 4791 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a531661-ab4d-4689-8dd6-a1627232f871-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.947522 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-956c96f98-prlnm" event={"ID":"0a531661-ab4d-4689-8dd6-a1627232f871","Type":"ContainerDied","Data":"6bb7b85fa984f0e83badbe2cb98f555dd541551bc3359960fd440a4959689036"} Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.947921 4791 scope.go:117] "RemoveContainer" containerID="44616c0ca617af27909f2cdd1af40974c7443bf98100ca11a1e3b3c5f50a56f9" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.947588 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-956c96f98-prlnm" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.980268 4791 scope.go:117] "RemoveContainer" containerID="aba8ab6d89e6ccce846772572bb70879600f867434c61a8143a38ae0f130b5f2" Feb 18 00:58:13 crc kubenswrapper[4791]: I0218 00:58:13.989487 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.002913 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-956c96f98-prlnm"] Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.417003 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7d9d7f9648-qr5lg" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.461686 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7b5c5fc9b-vzf74" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.576899 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.637256 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.715038 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880044 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880095 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880288 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880321 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880400 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880434 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tg7n\" (UniqueName: \"kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.880483 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd\") pod \"6fd9dd05-384e-406a-b195-af283d2807ad\" (UID: \"6fd9dd05-384e-406a-b195-af283d2807ad\") " Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.881463 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.881785 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.899238 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts" (OuterVolumeSpecName: "scripts") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.925504 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n" (OuterVolumeSpecName: "kube-api-access-5tg7n") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "kube-api-access-5tg7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.952418 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.985553 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.985585 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tg7n\" (UniqueName: \"kubernetes.io/projected/6fd9dd05-384e-406a-b195-af283d2807ad-kube-api-access-5tg7n\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.985597 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.985605 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:14 crc kubenswrapper[4791]: I0218 00:58:14.985614 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6fd9dd05-384e-406a-b195-af283d2807ad-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.013746 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fd9dd05-384e-406a-b195-af283d2807ad" containerID="a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb" exitCode=0 Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.013786 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerDied","Data":"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb"} Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.013811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6fd9dd05-384e-406a-b195-af283d2807ad","Type":"ContainerDied","Data":"5da4b8347d7a7e0943e4134961ec84b0da35a8fa5734fe6285c26a4e0efe9b94"} Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.013829 4791 scope.go:117] "RemoveContainer" containerID="65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.013971 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.054597 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.088100 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.090642 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.099514 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" path="/var/lib/kubelet/pods/0a531661-ab4d-4689-8dd6-a1627232f871/volumes" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.108027 4791 scope.go:117] "RemoveContainer" containerID="77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.111332 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.151251 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data" (OuterVolumeSpecName: "config-data") pod "6fd9dd05-384e-406a-b195-af283d2807ad" (UID: "6fd9dd05-384e-406a-b195-af283d2807ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.182858 4791 scope.go:117] "RemoveContainer" containerID="d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.188870 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle\") pod \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.188977 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom\") pod \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.189190 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ksbb\" (UniqueName: \"kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb\") pod \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.189210 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data\") pod \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\" (UID: \"81e44d40-acc5-4bae-a4dc-b9dd837fffef\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.189811 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fd9dd05-384e-406a-b195-af283d2807ad-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.192362 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "81e44d40-acc5-4bae-a4dc-b9dd837fffef" (UID: "81e44d40-acc5-4bae-a4dc-b9dd837fffef"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.193044 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb" (OuterVolumeSpecName: "kube-api-access-8ksbb") pod "81e44d40-acc5-4bae-a4dc-b9dd837fffef" (UID: "81e44d40-acc5-4bae-a4dc-b9dd837fffef"). InnerVolumeSpecName "kube-api-access-8ksbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.210891 4791 scope.go:117] "RemoveContainer" containerID="a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.219727 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81e44d40-acc5-4bae-a4dc-b9dd837fffef" (UID: "81e44d40-acc5-4bae-a4dc-b9dd837fffef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.236830 4791 scope.go:117] "RemoveContainer" containerID="65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.237783 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f\": container with ID starting with 65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f not found: ID does not exist" containerID="65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.237815 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f"} err="failed to get container status \"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f\": rpc error: code = NotFound desc = could not find container \"65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f\": container with ID starting with 65d1eba548bbfd6e1b79675061cb46b7a576d3c275f2e3feedff804c24c8ec9f not found: ID does not exist" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.237857 4791 scope.go:117] "RemoveContainer" containerID="77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.238305 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1\": container with ID starting with 77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1 not found: ID does not exist" containerID="77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.238348 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1"} err="failed to get container status \"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1\": rpc error: code = NotFound desc = could not find container \"77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1\": container with ID starting with 77d43c7a2a13ea39ea539167017369c55e1fdc1f07b5bdeaa157f1cca0bd85b1 not found: ID does not exist" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.238375 4791 scope.go:117] "RemoveContainer" containerID="d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.238745 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee\": container with ID starting with d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee not found: ID does not exist" containerID="d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.238770 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee"} err="failed to get container status \"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee\": rpc error: code = NotFound desc = could not find container \"d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee\": container with ID starting with d4f50ae34439c375f60cdffc235c43e058a66e829badc907c639166e3aeb44ee not found: ID does not exist" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.238785 4791 scope.go:117] "RemoveContainer" containerID="a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.239079 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb\": container with ID starting with a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb not found: ID does not exist" containerID="a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.239103 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb"} err="failed to get container status \"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb\": rpc error: code = NotFound desc = could not find container \"a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb\": container with ID starting with a20e7c0385c4910a664b018fd457ccf6d55a99bbc6835bee15b294024c8dcbdb not found: ID does not exist" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.243462 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data" (OuterVolumeSpecName: "config-data") pod "81e44d40-acc5-4bae-a4dc-b9dd837fffef" (UID: "81e44d40-acc5-4bae-a4dc-b9dd837fffef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.291545 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data\") pod \"cea6666e-f991-44f6-bb6f-55cead75043f\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.291666 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom\") pod \"cea6666e-f991-44f6-bb6f-55cead75043f\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.291743 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kntn\" (UniqueName: \"kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn\") pod \"cea6666e-f991-44f6-bb6f-55cead75043f\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.291866 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle\") pod \"cea6666e-f991-44f6-bb6f-55cead75043f\" (UID: \"cea6666e-f991-44f6-bb6f-55cead75043f\") " Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.292570 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ksbb\" (UniqueName: \"kubernetes.io/projected/81e44d40-acc5-4bae-a4dc-b9dd837fffef-kube-api-access-8ksbb\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.292593 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.292602 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.292611 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81e44d40-acc5-4bae-a4dc-b9dd837fffef-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.295306 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cea6666e-f991-44f6-bb6f-55cead75043f" (UID: "cea6666e-f991-44f6-bb6f-55cead75043f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.295561 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn" (OuterVolumeSpecName: "kube-api-access-5kntn") pod "cea6666e-f991-44f6-bb6f-55cead75043f" (UID: "cea6666e-f991-44f6-bb6f-55cead75043f"). InnerVolumeSpecName "kube-api-access-5kntn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.331742 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cea6666e-f991-44f6-bb6f-55cead75043f" (UID: "cea6666e-f991-44f6-bb6f-55cead75043f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.369416 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.380641 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data" (OuterVolumeSpecName: "config-data") pod "cea6666e-f991-44f6-bb6f-55cead75043f" (UID: "cea6666e-f991-44f6-bb6f-55cead75043f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.381889 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.395822 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kntn\" (UniqueName: \"kubernetes.io/projected/cea6666e-f991-44f6-bb6f-55cead75043f-kube-api-access-5kntn\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.395860 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.395871 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.395880 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cea6666e-f991-44f6-bb6f-55cead75043f-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.404579 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405216 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-log" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405239 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-log" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405251 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405260 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-api" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405271 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="proxy-httpd" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405278 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="proxy-httpd" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405297 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="sg-core" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405305 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="sg-core" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405318 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-central-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405328 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-central-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405351 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405359 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405369 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405378 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405404 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-notification-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405413 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-notification-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.405442 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405454 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405740 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405757 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-log" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405771 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405784 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-notification-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405799 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a531661-ab4d-4689-8dd6-a1627232f871" containerName="placement-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405815 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="sg-core" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405830 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405846 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="proxy-httpd" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405860 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" containerName="ceilometer-central-agent" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.405878 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" containerName="heat-api" Feb 18 00:58:15 crc kubenswrapper[4791]: E0218 00:58:15.406146 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.406182 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" containerName="heat-cfnapi" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.408789 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.410540 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.413566 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.414838 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.454379 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wvm6h" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" probeResult="failure" output=< Feb 18 00:58:15 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 00:58:15 crc kubenswrapper[4791]: > Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.497880 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pq2s\" (UniqueName: \"kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.497952 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.497996 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.498316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.498423 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.498526 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.498691 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.600705 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.601023 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.601502 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.601661 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.601260 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.602012 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pq2s\" (UniqueName: \"kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.602960 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.603684 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.604992 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.607826 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.607939 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.607961 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.609237 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.619102 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pq2s\" (UniqueName: \"kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s\") pod \"ceilometer-0\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " pod="openstack/ceilometer-0" Feb 18 00:58:15 crc kubenswrapper[4791]: I0218 00:58:15.742919 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.063953 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-657c5fbcdb-dphwk" event={"ID":"cea6666e-f991-44f6-bb6f-55cead75043f","Type":"ContainerDied","Data":"50cacdccab64a591e41c700fd5268d99d5bd48d1982c41e15f529b00d00d21f2"} Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.064005 4791 scope.go:117] "RemoveContainer" containerID="9818653ce514504e9b398b2998a0471c5002812e7e0f6d7c5ea536ea7eef7e33" Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.064092 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-657c5fbcdb-dphwk" Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.096566 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" event={"ID":"81e44d40-acc5-4bae-a4dc-b9dd837fffef","Type":"ContainerDied","Data":"0b2a88d3de4af50d90002f20a75565e63d056349935ba190c0efc3ade820183b"} Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.096924 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7f4b9bdf54-r459n" Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.126143 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.137317 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-657c5fbcdb-dphwk"] Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.137393 4791 scope.go:117] "RemoveContainer" containerID="c6d5189293d82e7a532d052b5bc004f1e15f18dcdd6071502970727ea050d8bd" Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.185209 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.198113 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7f4b9bdf54-r459n"] Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.326130 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:16 crc kubenswrapper[4791]: I0218 00:58:16.762381 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:17 crc kubenswrapper[4791]: I0218 00:58:17.074555 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fd9dd05-384e-406a-b195-af283d2807ad" path="/var/lib/kubelet/pods/6fd9dd05-384e-406a-b195-af283d2807ad/volumes" Feb 18 00:58:17 crc kubenswrapper[4791]: I0218 00:58:17.075747 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e44d40-acc5-4bae-a4dc-b9dd837fffef" path="/var/lib/kubelet/pods/81e44d40-acc5-4bae-a4dc-b9dd837fffef/volumes" Feb 18 00:58:17 crc kubenswrapper[4791]: I0218 00:58:17.076345 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cea6666e-f991-44f6-bb6f-55cead75043f" path="/var/lib/kubelet/pods/cea6666e-f991-44f6-bb6f-55cead75043f/volumes" Feb 18 00:58:17 crc kubenswrapper[4791]: I0218 00:58:17.116934 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerStarted","Data":"a7f84fcf40c1d2ffa1f8d6b4c39da978b7c146cecdbc5ae26ad4496d80299f1d"} Feb 18 00:58:17 crc kubenswrapper[4791]: I0218 00:58:17.116976 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerStarted","Data":"1a782df827e353e9e0c83ad91420d6d30bd2049f41ff44aa27d0b7dbfaf67e22"} Feb 18 00:58:19 crc kubenswrapper[4791]: I0218 00:58:19.663183 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-b4df76d98-82cxf" Feb 18 00:58:19 crc kubenswrapper[4791]: I0218 00:58:19.727538 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:58:19 crc kubenswrapper[4791]: I0218 00:58:19.727775 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-5686f7fc6f-xd9xk" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerName="heat-engine" containerID="cri-o://f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" gracePeriod=60 Feb 18 00:58:20 crc kubenswrapper[4791]: E0218 00:58:20.935269 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 18 00:58:20 crc kubenswrapper[4791]: E0218 00:58:20.944242 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 18 00:58:20 crc kubenswrapper[4791]: E0218 00:58:20.946752 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Feb 18 00:58:20 crc kubenswrapper[4791]: E0218 00:58:20.946790 4791 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-5686f7fc6f-xd9xk" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerName="heat-engine" Feb 18 00:58:23 crc kubenswrapper[4791]: I0218 00:58:23.207394 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" event={"ID":"f1d1a31c-1309-4888-8122-16d6be151b19","Type":"ContainerStarted","Data":"047f05aa57fed5d29f63ce2579a8d0fdef33b00590b0538db1f3a0405b7252eb"} Feb 18 00:58:23 crc kubenswrapper[4791]: I0218 00:58:23.216601 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerStarted","Data":"ec922737c482457d096c4845ebc23d17c6cd0d97742d1024c8163b630647b80b"} Feb 18 00:58:23 crc kubenswrapper[4791]: I0218 00:58:23.230831 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" podStartSLOduration=1.443938624 podStartE2EDuration="12.230815043s" podCreationTimestamp="2026-02-18 00:58:11 +0000 UTC" firstStartedPulling="2026-02-18 00:58:11.963837256 +0000 UTC m=+1433.531850426" lastFinishedPulling="2026-02-18 00:58:22.750713675 +0000 UTC m=+1444.318726845" observedRunningTime="2026-02-18 00:58:23.229558385 +0000 UTC m=+1444.797571555" watchObservedRunningTime="2026-02-18 00:58:23.230815043 +0000 UTC m=+1444.798828213" Feb 18 00:58:24 crc kubenswrapper[4791]: I0218 00:58:24.227488 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerStarted","Data":"69705292e99c96a1f7dad4e21e2b839371a398c1f11b86b523957b6f15bdad1f"} Feb 18 00:58:25 crc kubenswrapper[4791]: I0218 00:58:25.443208 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wvm6h" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" probeResult="failure" output=< Feb 18 00:58:25 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 00:58:25 crc kubenswrapper[4791]: > Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.253716 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerStarted","Data":"ac0e7a3080862effad474c40e95a255b3ee4285698a43ca9e5e661d5bebcf6f0"} Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.253901 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-central-agent" containerID="cri-o://a7f84fcf40c1d2ffa1f8d6b4c39da978b7c146cecdbc5ae26ad4496d80299f1d" gracePeriod=30 Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.254035 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="proxy-httpd" containerID="cri-o://ac0e7a3080862effad474c40e95a255b3ee4285698a43ca9e5e661d5bebcf6f0" gracePeriod=30 Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.254081 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="sg-core" containerID="cri-o://69705292e99c96a1f7dad4e21e2b839371a398c1f11b86b523957b6f15bdad1f" gracePeriod=30 Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.254135 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-notification-agent" containerID="cri-o://ec922737c482457d096c4845ebc23d17c6cd0d97742d1024c8163b630647b80b" gracePeriod=30 Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.254355 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.290256 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.484948909 podStartE2EDuration="11.290202598s" podCreationTimestamp="2026-02-18 00:58:15 +0000 UTC" firstStartedPulling="2026-02-18 00:58:16.330458651 +0000 UTC m=+1437.898471821" lastFinishedPulling="2026-02-18 00:58:25.13571234 +0000 UTC m=+1446.703725510" observedRunningTime="2026-02-18 00:58:26.273888933 +0000 UTC m=+1447.841902113" watchObservedRunningTime="2026-02-18 00:58:26.290202598 +0000 UTC m=+1447.858215768" Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.800496 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:58:26 crc kubenswrapper[4791]: I0218 00:58:26.800788 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265761 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c44ac4f-9963-409d-af44-e14663f07248" containerID="ac0e7a3080862effad474c40e95a255b3ee4285698a43ca9e5e661d5bebcf6f0" exitCode=0 Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265795 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c44ac4f-9963-409d-af44-e14663f07248" containerID="69705292e99c96a1f7dad4e21e2b839371a398c1f11b86b523957b6f15bdad1f" exitCode=2 Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265802 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c44ac4f-9963-409d-af44-e14663f07248" containerID="ec922737c482457d096c4845ebc23d17c6cd0d97742d1024c8163b630647b80b" exitCode=0 Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265826 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerDied","Data":"ac0e7a3080862effad474c40e95a255b3ee4285698a43ca9e5e661d5bebcf6f0"} Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265865 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerDied","Data":"69705292e99c96a1f7dad4e21e2b839371a398c1f11b86b523957b6f15bdad1f"} Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.265880 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerDied","Data":"ec922737c482457d096c4845ebc23d17c6cd0d97742d1024c8163b630647b80b"} Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.879958 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.880195 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-log" containerID="cri-o://8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b" gracePeriod=30 Feb 18 00:58:27 crc kubenswrapper[4791]: I0218 00:58:27.880673 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-httpd" containerID="cri-o://38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa" gracePeriod=30 Feb 18 00:58:28 crc kubenswrapper[4791]: E0218 00:58:28.191272 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod718be5cf_bdd1_4639_9fc5_343a770f1244.slice/crio-8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod718be5cf_bdd1_4639_9fc5_343a770f1244.slice/crio-conmon-8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b.scope\": RecentStats: unable to find data in memory cache]" Feb 18 00:58:28 crc kubenswrapper[4791]: I0218 00:58:28.284494 4791 generic.go:334] "Generic (PLEG): container finished" podID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerID="8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b" exitCode=143 Feb 18 00:58:28 crc kubenswrapper[4791]: I0218 00:58:28.284548 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerDied","Data":"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b"} Feb 18 00:58:29 crc kubenswrapper[4791]: I0218 00:58:29.916147 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:29 crc kubenswrapper[4791]: I0218 00:58:29.916879 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-log" containerID="cri-o://582a216500d9a6b639bae64411291d499bbc933af54e6099e2a8928ff963d396" gracePeriod=30 Feb 18 00:58:29 crc kubenswrapper[4791]: I0218 00:58:29.916914 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-httpd" containerID="cri-o://88ea5f88ed7bfed3311bc6d29fa7c4418d1dd0ebeb5bc50dfe90c18b8aa803a7" gracePeriod=30 Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.310253 4791 generic.go:334] "Generic (PLEG): container finished" podID="0c44ac4f-9963-409d-af44-e14663f07248" containerID="a7f84fcf40c1d2ffa1f8d6b4c39da978b7c146cecdbc5ae26ad4496d80299f1d" exitCode=0 Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.310609 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerDied","Data":"a7f84fcf40c1d2ffa1f8d6b4c39da978b7c146cecdbc5ae26ad4496d80299f1d"} Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.315029 4791 generic.go:334] "Generic (PLEG): container finished" podID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerID="f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" exitCode=0 Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.315111 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5686f7fc6f-xd9xk" event={"ID":"f729e438-68f7-48b7-9a93-e54e1da4c045","Type":"ContainerDied","Data":"f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4"} Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.315177 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5686f7fc6f-xd9xk" event={"ID":"f729e438-68f7-48b7-9a93-e54e1da4c045","Type":"ContainerDied","Data":"fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5"} Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.315194 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb46bb1fe01722d5f081fa38d2e8a68b858b5b1e84773d32503419c1acc29ca5" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.317683 4791 generic.go:334] "Generic (PLEG): container finished" podID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerID="582a216500d9a6b639bae64411291d499bbc933af54e6099e2a8928ff963d396" exitCode=143 Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.317714 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerDied","Data":"582a216500d9a6b639bae64411291d499bbc933af54e6099e2a8928ff963d396"} Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.330473 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.422358 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle\") pod \"f729e438-68f7-48b7-9a93-e54e1da4c045\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.422443 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq7dn\" (UniqueName: \"kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn\") pod \"f729e438-68f7-48b7-9a93-e54e1da4c045\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.422473 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom\") pod \"f729e438-68f7-48b7-9a93-e54e1da4c045\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.422614 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data\") pod \"f729e438-68f7-48b7-9a93-e54e1da4c045\" (UID: \"f729e438-68f7-48b7-9a93-e54e1da4c045\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.429024 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f729e438-68f7-48b7-9a93-e54e1da4c045" (UID: "f729e438-68f7-48b7-9a93-e54e1da4c045"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.429713 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn" (OuterVolumeSpecName: "kube-api-access-vq7dn") pod "f729e438-68f7-48b7-9a93-e54e1da4c045" (UID: "f729e438-68f7-48b7-9a93-e54e1da4c045"). InnerVolumeSpecName "kube-api-access-vq7dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.462692 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f729e438-68f7-48b7-9a93-e54e1da4c045" (UID: "f729e438-68f7-48b7-9a93-e54e1da4c045"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.489408 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data" (OuterVolumeSpecName: "config-data") pod "f729e438-68f7-48b7-9a93-e54e1da4c045" (UID: "f729e438-68f7-48b7-9a93-e54e1da4c045"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.502371 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.525502 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.525533 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq7dn\" (UniqueName: \"kubernetes.io/projected/f729e438-68f7-48b7-9a93-e54e1da4c045-kube-api-access-vq7dn\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.525545 4791 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data-custom\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.525555 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f729e438-68f7-48b7-9a93-e54e1da4c045-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627446 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627537 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627569 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627610 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627646 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pq2s\" (UniqueName: \"kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627679 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd\") pod \"0c44ac4f-9963-409d-af44-e14663f07248\" (UID: \"0c44ac4f-9963-409d-af44-e14663f07248\") " Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.627761 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.628199 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.628414 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.631567 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts" (OuterVolumeSpecName: "scripts") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.632654 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s" (OuterVolumeSpecName: "kube-api-access-7pq2s") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "kube-api-access-7pq2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.661823 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.715915 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.730026 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.730057 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.730069 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.730078 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pq2s\" (UniqueName: \"kubernetes.io/projected/0c44ac4f-9963-409d-af44-e14663f07248-kube-api-access-7pq2s\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.730088 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c44ac4f-9963-409d-af44-e14663f07248-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.744537 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data" (OuterVolumeSpecName: "config-data") pod "0c44ac4f-9963-409d-af44-e14663f07248" (UID: "0c44ac4f-9963-409d-af44-e14663f07248"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:30 crc kubenswrapper[4791]: I0218 00:58:30.832391 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c44ac4f-9963-409d-af44-e14663f07248-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.331093 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.331124 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0c44ac4f-9963-409d-af44-e14663f07248","Type":"ContainerDied","Data":"1a782df827e353e9e0c83ad91420d6d30bd2049f41ff44aa27d0b7dbfaf67e22"} Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.331186 4791 scope.go:117] "RemoveContainer" containerID="ac0e7a3080862effad474c40e95a255b3ee4285698a43ca9e5e661d5bebcf6f0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.331101 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5686f7fc6f-xd9xk" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.358359 4791 scope.go:117] "RemoveContainer" containerID="69705292e99c96a1f7dad4e21e2b839371a398c1f11b86b523957b6f15bdad1f" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.368410 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.379838 4791 scope.go:117] "RemoveContainer" containerID="ec922737c482457d096c4845ebc23d17c6cd0d97742d1024c8163b630647b80b" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.385352 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.402292 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.419218 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-5686f7fc6f-xd9xk"] Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.428824 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:31 crc kubenswrapper[4791]: E0218 00:58:31.429312 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="proxy-httpd" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429336 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="proxy-httpd" Feb 18 00:58:31 crc kubenswrapper[4791]: E0218 00:58:31.429349 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="sg-core" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429360 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="sg-core" Feb 18 00:58:31 crc kubenswrapper[4791]: E0218 00:58:31.429377 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-notification-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429384 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-notification-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: E0218 00:58:31.429415 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerName="heat-engine" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429421 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerName="heat-engine" Feb 18 00:58:31 crc kubenswrapper[4791]: E0218 00:58:31.429447 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-central-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429458 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-central-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429684 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="sg-core" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429708 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-central-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429720 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" containerName="heat-engine" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429742 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="proxy-httpd" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.429759 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c44ac4f-9963-409d-af44-e14663f07248" containerName="ceilometer-notification-agent" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.431984 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.459934 4791 scope.go:117] "RemoveContainer" containerID="a7f84fcf40c1d2ffa1f8d6b4c39da978b7c146cecdbc5ae26ad4496d80299f1d" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.463029 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.465376 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.472826 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563343 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563430 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563457 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79k75\" (UniqueName: \"kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563512 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563532 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563575 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.563590 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.665830 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666107 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79k75\" (UniqueName: \"kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666178 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666202 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666253 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666268 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666369 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666433 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.666723 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.671265 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.673928 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.685096 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.688765 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79k75\" (UniqueName: \"kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.689208 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data\") pod \"ceilometer-0\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " pod="openstack/ceilometer-0" Feb 18 00:58:31 crc kubenswrapper[4791]: I0218 00:58:31.880353 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.022233 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.182868 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.182957 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183030 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183112 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183562 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs" (OuterVolumeSpecName: "logs") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183618 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183743 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183793 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvh9c\" (UniqueName: \"kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.183892 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data\") pod \"718be5cf-bdd1-4639-9fc5-343a770f1244\" (UID: \"718be5cf-bdd1-4639-9fc5-343a770f1244\") " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.184701 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.184733 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.196923 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c" (OuterVolumeSpecName: "kube-api-access-cvh9c") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "kube-api-access-cvh9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.206359 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts" (OuterVolumeSpecName: "scripts") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.223239 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.239883 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (OuterVolumeSpecName: "glance") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "pvc-4472ce99-9885-40a2-bc85-66819bd1580e". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.259111 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data" (OuterVolumeSpecName: "config-data") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.261825 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "718be5cf-bdd1-4639-9fc5-343a770f1244" (UID: "718be5cf-bdd1-4639-9fc5-343a770f1244"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287429 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287470 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287485 4791 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287498 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718be5cf-bdd1-4639-9fc5-343a770f1244-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287541 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" " Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287556 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/718be5cf-bdd1-4639-9fc5-343a770f1244-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.287567 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvh9c\" (UniqueName: \"kubernetes.io/projected/718be5cf-bdd1-4639-9fc5-343a770f1244-kube-api-access-cvh9c\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.322053 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.322225 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4472ce99-9885-40a2-bc85-66819bd1580e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e") on node "crc" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.344787 4791 generic.go:334] "Generic (PLEG): container finished" podID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerID="38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa" exitCode=0 Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.344823 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerDied","Data":"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa"} Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.344844 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"718be5cf-bdd1-4639-9fc5-343a770f1244","Type":"ContainerDied","Data":"13804206f29b8a896baf9d18d433f189c264418eb140b0f9390c85c4208f93cf"} Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.344860 4791 scope.go:117] "RemoveContainer" containerID="38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.344877 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.388010 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.389531 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.408486 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.408762 4791 scope.go:117] "RemoveContainer" containerID="8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.433699 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.456290 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:32 crc kubenswrapper[4791]: E0218 00:58:32.457538 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-log" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.457562 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-log" Feb 18 00:58:32 crc kubenswrapper[4791]: E0218 00:58:32.457576 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-httpd" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.457582 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-httpd" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.457878 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-httpd" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.457897 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" containerName="glance-log" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.458276 4791 scope.go:117] "RemoveContainer" containerID="38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.460168 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: E0218 00:58:32.460828 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa\": container with ID starting with 38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa not found: ID does not exist" containerID="38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.460870 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa"} err="failed to get container status \"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa\": rpc error: code = NotFound desc = could not find container \"38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa\": container with ID starting with 38a6fa60495cbae75896050e9525f0c658396c2a526071571bf237bcb5629cfa not found: ID does not exist" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.460896 4791 scope.go:117] "RemoveContainer" containerID="8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b" Feb 18 00:58:32 crc kubenswrapper[4791]: E0218 00:58:32.461406 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b\": container with ID starting with 8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b not found: ID does not exist" containerID="8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.461432 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b"} err="failed to get container status \"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b\": rpc error: code = NotFound desc = could not find container \"8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b\": container with ID starting with 8a5fe3982a7495bb52009a917224a3e8e43967c5bfa0a135339fe3e84e58258b not found: ID does not exist" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.462234 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.464207 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.492460 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.594858 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-config-data\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.594956 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qzh6\" (UniqueName: \"kubernetes.io/projected/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-kube-api-access-9qzh6\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.594982 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.595246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.595339 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-logs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.595397 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.595447 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-scripts\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.595534 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697670 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qzh6\" (UniqueName: \"kubernetes.io/projected/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-kube-api-access-9qzh6\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697734 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697786 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697831 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-logs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697867 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697892 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-scripts\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.697987 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.698330 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.698367 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-logs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.698860 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-config-data\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.703714 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.704426 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-config-data\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.704828 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.705765 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-scripts\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.718814 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.718864 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/62820ba3c35aec4dc771d82e657fcb43f04a79384f8e10003d10025b9d199fc2/globalmount\"" pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.718885 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qzh6\" (UniqueName: \"kubernetes.io/projected/04c2d5ce-ba0d-4b53-8396-ee56f79b1c81-kube-api-access-9qzh6\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.763033 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4472ce99-9885-40a2-bc85-66819bd1580e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4472ce99-9885-40a2-bc85-66819bd1580e\") pod \"glance-default-external-api-0\" (UID: \"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81\") " pod="openstack/glance-default-external-api-0" Feb 18 00:58:32 crc kubenswrapper[4791]: I0218 00:58:32.783396 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.089123 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c44ac4f-9963-409d-af44-e14663f07248" path="/var/lib/kubelet/pods/0c44ac4f-9963-409d-af44-e14663f07248/volumes" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.090756 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="718be5cf-bdd1-4639-9fc5-343a770f1244" path="/var/lib/kubelet/pods/718be5cf-bdd1-4639-9fc5-343a770f1244/volumes" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.091826 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f729e438-68f7-48b7-9a93-e54e1da4c045" path="/var/lib/kubelet/pods/f729e438-68f7-48b7-9a93-e54e1da4c045/volumes" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.174996 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.357255 4791 generic.go:334] "Generic (PLEG): container finished" podID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerID="88ea5f88ed7bfed3311bc6d29fa7c4418d1dd0ebeb5bc50dfe90c18b8aa803a7" exitCode=0 Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.357339 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerDied","Data":"88ea5f88ed7bfed3311bc6d29fa7c4418d1dd0ebeb5bc50dfe90c18b8aa803a7"} Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.360726 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerStarted","Data":"c9d8f640beca5849fc7c2cf9f71df3cbf5fb52333a0f7be2d960aaa1a59d31bb"} Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.360785 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerStarted","Data":"b53aac7510481234569aea12434434d2d121760fea4778ee3a6e8564a5417515"} Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.464659 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.654133 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.721505 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.721590 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.721682 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.721728 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.721753 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2nsc\" (UniqueName: \"kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.722229 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs" (OuterVolumeSpecName: "logs") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.722706 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.722771 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.722896 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data\") pod \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\" (UID: \"88f5c47b-706b-4d5b-9822-56c13e90a7a9\") " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.723425 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.724220 4791 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-httpd-run\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.724312 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88f5c47b-706b-4d5b-9822-56c13e90a7a9-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.728623 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc" (OuterVolumeSpecName: "kube-api-access-q2nsc") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "kube-api-access-q2nsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.732027 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts" (OuterVolumeSpecName: "scripts") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.772448 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (OuterVolumeSpecName: "glance") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.778028 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.816899 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.846036 4791 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.846081 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.846095 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2nsc\" (UniqueName: \"kubernetes.io/projected/88f5c47b-706b-4d5b-9822-56c13e90a7a9-kube-api-access-q2nsc\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.846111 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.852681 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" " Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.858643 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data" (OuterVolumeSpecName: "config-data") pod "88f5c47b-706b-4d5b-9822-56c13e90a7a9" (UID: "88f5c47b-706b-4d5b-9822-56c13e90a7a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.899628 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.899786 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5") on node "crc" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.955328 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88f5c47b-706b-4d5b-9822-56c13e90a7a9-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:33 crc kubenswrapper[4791]: I0218 00:58:33.955362 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.383847 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81","Type":"ContainerStarted","Data":"fec1ea4abd5dd555765c3bea252c4aff5851b49df855db2ac9ef8d10693216f3"} Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.384309 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81","Type":"ContainerStarted","Data":"84dbfec9d2b1566983c670699eca34e904219cb57d3c5070e04b00b3e96ee0f6"} Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.387691 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerStarted","Data":"352e25e8fc7002b58d67c51919b1d6f5f01d829369bdc85460aa0cacaca83d63"} Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.394099 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"88f5c47b-706b-4d5b-9822-56c13e90a7a9","Type":"ContainerDied","Data":"48446f99549ad5f7da62ca57e2e129f93b731f12ecae9599ac0df2a974fb2a15"} Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.394221 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.394230 4791 scope.go:117] "RemoveContainer" containerID="88ea5f88ed7bfed3311bc6d29fa7c4418d1dd0ebeb5bc50dfe90c18b8aa803a7" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.512878 4791 scope.go:117] "RemoveContainer" containerID="582a216500d9a6b639bae64411291d499bbc933af54e6099e2a8928ff963d396" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.529204 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.549911 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.567702 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:34 crc kubenswrapper[4791]: E0218 00:58:34.568332 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-log" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.568347 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-log" Feb 18 00:58:34 crc kubenswrapper[4791]: E0218 00:58:34.568363 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-httpd" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.568368 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-httpd" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.568559 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-log" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.568573 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" containerName="glance-httpd" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.569983 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.573897 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.574095 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.585873 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674138 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674544 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-579x6\" (UniqueName: \"kubernetes.io/projected/5a7ad9d2-ee92-4248-be3d-e687312e819f-kube-api-access-579x6\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674740 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674816 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674882 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.674925 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.675213 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.675458 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777199 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777348 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777420 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777485 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-579x6\" (UniqueName: \"kubernetes.io/projected/5a7ad9d2-ee92-4248-be3d-e687312e819f-kube-api-access-579x6\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777568 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777617 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777664 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777702 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.777793 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.779486 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a7ad9d2-ee92-4248-be3d-e687312e819f-logs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.782326 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.784335 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.784463 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.786140 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.786185 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9790f76315acc35c8fd6eab9a9221b467468b56b96e913c660367fc7d70a609d/globalmount\"" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.786560 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7ad9d2-ee92-4248-be3d-e687312e819f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.797241 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-579x6\" (UniqueName: \"kubernetes.io/projected/5a7ad9d2-ee92-4248-be3d-e687312e819f-kube-api-access-579x6\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.844291 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b33f130-f1b7-4e09-98c7-cb727d392dd5\") pod \"glance-default-internal-api-0\" (UID: \"5a7ad9d2-ee92-4248-be3d-e687312e819f\") " pod="openstack/glance-default-internal-api-0" Feb 18 00:58:34 crc kubenswrapper[4791]: I0218 00:58:34.906077 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.078578 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88f5c47b-706b-4d5b-9822-56c13e90a7a9" path="/var/lib/kubelet/pods/88f5c47b-706b-4d5b-9822-56c13e90a7a9/volumes" Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.408705 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerStarted","Data":"a98ed893453365edaef2dda995ba893eeabde69ea039e3e019401d84725917d9"} Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.413652 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"04c2d5ce-ba0d-4b53-8396-ee56f79b1c81","Type":"ContainerStarted","Data":"c7deee4f054c2a67ab1b0dd7665078991d89594efa383e42ea2837f5f71ce756"} Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.442574 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.442546072 podStartE2EDuration="3.442546072s" podCreationTimestamp="2026-02-18 00:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:35.435744012 +0000 UTC m=+1457.003757182" watchObservedRunningTime="2026-02-18 00:58:35.442546072 +0000 UTC m=+1457.010559242" Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.454672 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wvm6h" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" probeResult="failure" output=< Feb 18 00:58:35 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 00:58:35 crc kubenswrapper[4791]: > Feb 18 00:58:35 crc kubenswrapper[4791]: I0218 00:58:35.533591 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Feb 18 00:58:35 crc kubenswrapper[4791]: W0218 00:58:35.568015 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a7ad9d2_ee92_4248_be3d_e687312e819f.slice/crio-dd9b93aa019ed363addbb148dfce35e90991ef934c32b5141953ee31fc69dbd1 WatchSource:0}: Error finding container dd9b93aa019ed363addbb148dfce35e90991ef934c32b5141953ee31fc69dbd1: Status 404 returned error can't find the container with id dd9b93aa019ed363addbb148dfce35e90991ef934c32b5141953ee31fc69dbd1 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.443267 4791 generic.go:334] "Generic (PLEG): container finished" podID="f1d1a31c-1309-4888-8122-16d6be151b19" containerID="047f05aa57fed5d29f63ce2579a8d0fdef33b00590b0538db1f3a0405b7252eb" exitCode=0 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.443339 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" event={"ID":"f1d1a31c-1309-4888-8122-16d6be151b19","Type":"ContainerDied","Data":"047f05aa57fed5d29f63ce2579a8d0fdef33b00590b0538db1f3a0405b7252eb"} Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453301 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerStarted","Data":"bb81ec1b470b4c22fbe2e64e7756de621c603a68fc5c78cb4e5e3381cd64ccd1"} Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453484 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-central-agent" containerID="cri-o://c9d8f640beca5849fc7c2cf9f71df3cbf5fb52333a0f7be2d960aaa1a59d31bb" gracePeriod=30 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453500 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="sg-core" containerID="cri-o://a98ed893453365edaef2dda995ba893eeabde69ea039e3e019401d84725917d9" gracePeriod=30 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453576 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="proxy-httpd" containerID="cri-o://bb81ec1b470b4c22fbe2e64e7756de621c603a68fc5c78cb4e5e3381cd64ccd1" gracePeriod=30 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453638 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.453598 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-notification-agent" containerID="cri-o://352e25e8fc7002b58d67c51919b1d6f5f01d829369bdc85460aa0cacaca83d63" gracePeriod=30 Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.479612 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5a7ad9d2-ee92-4248-be3d-e687312e819f","Type":"ContainerStarted","Data":"d40834d28c75126ac8fc555a5b978d219259f623b17aa362007d61917a2bfd69"} Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.479652 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5a7ad9d2-ee92-4248-be3d-e687312e819f","Type":"ContainerStarted","Data":"dd9b93aa019ed363addbb148dfce35e90991ef934c32b5141953ee31fc69dbd1"} Feb 18 00:58:36 crc kubenswrapper[4791]: I0218 00:58:36.482050 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.182134951 podStartE2EDuration="5.482036373s" podCreationTimestamp="2026-02-18 00:58:31 +0000 UTC" firstStartedPulling="2026-02-18 00:58:32.459277023 +0000 UTC m=+1454.027290193" lastFinishedPulling="2026-02-18 00:58:35.759178445 +0000 UTC m=+1457.327191615" observedRunningTime="2026-02-18 00:58:36.481333951 +0000 UTC m=+1458.049347121" watchObservedRunningTime="2026-02-18 00:58:36.482036373 +0000 UTC m=+1458.050049543" Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.491663 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5a7ad9d2-ee92-4248-be3d-e687312e819f","Type":"ContainerStarted","Data":"f3c21ca34298386fbd4fa3f59c747f2ddfc32d656ee5a33acc5abeb80ba8bdc6"} Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.494642 4791 generic.go:334] "Generic (PLEG): container finished" podID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerID="bb81ec1b470b4c22fbe2e64e7756de621c603a68fc5c78cb4e5e3381cd64ccd1" exitCode=0 Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.494762 4791 generic.go:334] "Generic (PLEG): container finished" podID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerID="a98ed893453365edaef2dda995ba893eeabde69ea039e3e019401d84725917d9" exitCode=2 Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.494824 4791 generic.go:334] "Generic (PLEG): container finished" podID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerID="352e25e8fc7002b58d67c51919b1d6f5f01d829369bdc85460aa0cacaca83d63" exitCode=0 Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.495063 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerDied","Data":"bb81ec1b470b4c22fbe2e64e7756de621c603a68fc5c78cb4e5e3381cd64ccd1"} Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.495200 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerDied","Data":"a98ed893453365edaef2dda995ba893eeabde69ea039e3e019401d84725917d9"} Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.495285 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerDied","Data":"352e25e8fc7002b58d67c51919b1d6f5f01d829369bdc85460aa0cacaca83d63"} Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.523445 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.523423082 podStartE2EDuration="3.523423082s" podCreationTimestamp="2026-02-18 00:58:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:37.511784202 +0000 UTC m=+1459.079797372" watchObservedRunningTime="2026-02-18 00:58:37.523423082 +0000 UTC m=+1459.091436252" Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.920849 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.960900 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts\") pod \"f1d1a31c-1309-4888-8122-16d6be151b19\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.960965 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tgs9\" (UniqueName: \"kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9\") pod \"f1d1a31c-1309-4888-8122-16d6be151b19\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.961024 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data\") pod \"f1d1a31c-1309-4888-8122-16d6be151b19\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.961137 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle\") pod \"f1d1a31c-1309-4888-8122-16d6be151b19\" (UID: \"f1d1a31c-1309-4888-8122-16d6be151b19\") " Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.966711 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts" (OuterVolumeSpecName: "scripts") pod "f1d1a31c-1309-4888-8122-16d6be151b19" (UID: "f1d1a31c-1309-4888-8122-16d6be151b19"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.967274 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9" (OuterVolumeSpecName: "kube-api-access-5tgs9") pod "f1d1a31c-1309-4888-8122-16d6be151b19" (UID: "f1d1a31c-1309-4888-8122-16d6be151b19"). InnerVolumeSpecName "kube-api-access-5tgs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:37 crc kubenswrapper[4791]: I0218 00:58:37.992895 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1d1a31c-1309-4888-8122-16d6be151b19" (UID: "f1d1a31c-1309-4888-8122-16d6be151b19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.017632 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data" (OuterVolumeSpecName: "config-data") pod "f1d1a31c-1309-4888-8122-16d6be151b19" (UID: "f1d1a31c-1309-4888-8122-16d6be151b19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.064173 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.064209 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.064219 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1d1a31c-1309-4888-8122-16d6be151b19-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.064230 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tgs9\" (UniqueName: \"kubernetes.io/projected/f1d1a31c-1309-4888-8122-16d6be151b19-kube-api-access-5tgs9\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.505708 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" event={"ID":"f1d1a31c-1309-4888-8122-16d6be151b19","Type":"ContainerDied","Data":"a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d"} Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.506057 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a98ad8fec5c17f8fff2263bf70bf321a46baa4608892afcf8d7e2bdaa389086d" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.505750 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6jpnb" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.571206 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 18 00:58:38 crc kubenswrapper[4791]: E0218 00:58:38.571959 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1d1a31c-1309-4888-8122-16d6be151b19" containerName="nova-cell0-conductor-db-sync" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.571976 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1d1a31c-1309-4888-8122-16d6be151b19" containerName="nova-cell0-conductor-db-sync" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.572376 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1d1a31c-1309-4888-8122-16d6be151b19" containerName="nova-cell0-conductor-db-sync" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.573125 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.576666 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.576669 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-cpczt" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.588146 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.675688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.675820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpn6p\" (UniqueName: \"kubernetes.io/projected/352e15b6-33bb-4c81-9ace-0450f934eec1-kube-api-access-mpn6p\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.675848 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.777914 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpn6p\" (UniqueName: \"kubernetes.io/projected/352e15b6-33bb-4c81-9ace-0450f934eec1-kube-api-access-mpn6p\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.777964 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.778142 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.783216 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.793215 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/352e15b6-33bb-4c81-9ace-0450f934eec1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.797928 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpn6p\" (UniqueName: \"kubernetes.io/projected/352e15b6-33bb-4c81-9ace-0450f934eec1-kube-api-access-mpn6p\") pod \"nova-cell0-conductor-0\" (UID: \"352e15b6-33bb-4c81-9ace-0450f934eec1\") " pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:38 crc kubenswrapper[4791]: I0218 00:58:38.888839 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:39 crc kubenswrapper[4791]: I0218 00:58:39.340932 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Feb 18 00:58:39 crc kubenswrapper[4791]: I0218 00:58:39.519637 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"352e15b6-33bb-4c81-9ace-0450f934eec1","Type":"ContainerStarted","Data":"ed7883995e5dd8066b7c4f7e538e3fb068ea601e1090c58f5633562504414efb"} Feb 18 00:58:40 crc kubenswrapper[4791]: I0218 00:58:40.530858 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"352e15b6-33bb-4c81-9ace-0450f934eec1","Type":"ContainerStarted","Data":"74e942012b888e37af5c6f2e38c6c9da891a7fe84b7ac134edc0468e33bc3ec3"} Feb 18 00:58:40 crc kubenswrapper[4791]: I0218 00:58:40.531043 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:40 crc kubenswrapper[4791]: I0218 00:58:40.553969 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.553954154 podStartE2EDuration="2.553954154s" podCreationTimestamp="2026-02-18 00:58:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:40.549824216 +0000 UTC m=+1462.117837386" watchObservedRunningTime="2026-02-18 00:58:40.553954154 +0000 UTC m=+1462.121967324" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.554026 4791 generic.go:334] "Generic (PLEG): container finished" podID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerID="c9d8f640beca5849fc7c2cf9f71df3cbf5fb52333a0f7be2d960aaa1a59d31bb" exitCode=0 Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.554289 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerDied","Data":"c9d8f640beca5849fc7c2cf9f71df3cbf5fb52333a0f7be2d960aaa1a59d31bb"} Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.685634 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.760870 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.760931 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.760958 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.761042 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79k75\" (UniqueName: \"kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.761119 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.761180 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.761300 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd\") pod \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\" (UID: \"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5\") " Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.761796 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.762000 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.767747 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts" (OuterVolumeSpecName: "scripts") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.769354 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75" (OuterVolumeSpecName: "kube-api-access-79k75") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "kube-api-access-79k75". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.784334 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.784368 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.819078 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.822455 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.833799 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.863824 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.863854 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79k75\" (UniqueName: \"kubernetes.io/projected/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-kube-api-access-79k75\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.863864 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.863875 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.863885 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.882719 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.955243 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data" (OuterVolumeSpecName: "config-data") pod "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" (UID: "8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.965382 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:42 crc kubenswrapper[4791]: I0218 00:58:42.965411 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.567576 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5","Type":"ContainerDied","Data":"b53aac7510481234569aea12434434d2d121760fea4778ee3a6e8564a5417515"} Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.567929 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.567945 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.567585 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.568952 4791 scope.go:117] "RemoveContainer" containerID="bb81ec1b470b4c22fbe2e64e7756de621c603a68fc5c78cb4e5e3381cd64ccd1" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.596047 4791 scope.go:117] "RemoveContainer" containerID="a98ed893453365edaef2dda995ba893eeabde69ea039e3e019401d84725917d9" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.599009 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.614304 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.639048 4791 scope.go:117] "RemoveContainer" containerID="352e25e8fc7002b58d67c51919b1d6f5f01d829369bdc85460aa0cacaca83d63" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.651890 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:43 crc kubenswrapper[4791]: E0218 00:58:43.652740 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-central-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.652759 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-central-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: E0218 00:58:43.652816 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="proxy-httpd" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.652823 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="proxy-httpd" Feb 18 00:58:43 crc kubenswrapper[4791]: E0218 00:58:43.652842 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="sg-core" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.652848 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="sg-core" Feb 18 00:58:43 crc kubenswrapper[4791]: E0218 00:58:43.652872 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-notification-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.652878 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-notification-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.653301 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="proxy-httpd" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.653329 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-notification-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.653346 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="ceilometer-central-agent" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.653586 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" containerName="sg-core" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.658060 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.660301 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.660411 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.679865 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.679959 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.680096 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.680266 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.680460 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wr2p\" (UniqueName: \"kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.680502 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.680558 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.692890 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.696332 4791 scope.go:117] "RemoveContainer" containerID="c9d8f640beca5849fc7c2cf9f71df3cbf5fb52333a0f7be2d960aaa1a59d31bb" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.782893 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783024 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wr2p\" (UniqueName: \"kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783064 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783101 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783143 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783193 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783212 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.783910 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.790125 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.796121 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.798743 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.799281 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.800578 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wr2p\" (UniqueName: \"kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p\") pod \"ceilometer-0\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " pod="openstack/ceilometer-0" Feb 18 00:58:43 crc kubenswrapper[4791]: I0218 00:58:43.989621 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.324148 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.452581 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.482405 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:44 crc kubenswrapper[4791]: W0218 00:58:44.489358 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4271166c_c69d_4f8c_bbe3_4c9702dd002b.slice/crio-a167d1bbcd10a5bba38a3b02cb66c2c3b43ef7b17a2c33b0cf80aed467fc1860 WatchSource:0}: Error finding container a167d1bbcd10a5bba38a3b02cb66c2c3b43ef7b17a2c33b0cf80aed467fc1860: Status 404 returned error can't find the container with id a167d1bbcd10a5bba38a3b02cb66c2c3b43ef7b17a2c33b0cf80aed467fc1860 Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.500777 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.579744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerStarted","Data":"a167d1bbcd10a5bba38a3b02cb66c2c3b43ef7b17a2c33b0cf80aed467fc1860"} Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.907115 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.907178 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.945889 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:44 crc kubenswrapper[4791]: I0218 00:58:44.967682 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.076795 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5" path="/var/lib/kubelet/pods/8f0ce0c2-e9d4-4d00-97bd-81e38fa324b5/volumes" Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.135122 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.595309 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerStarted","Data":"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936"} Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.595511 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wvm6h" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" containerID="cri-o://466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730" gracePeriod=2 Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.596332 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:45 crc kubenswrapper[4791]: I0218 00:58:45.596516 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.110030 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.161460 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.161574 4791 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.163243 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.239041 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities\") pod \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.239537 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content\") pod \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.239744 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql95m\" (UniqueName: \"kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m\") pod \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\" (UID: \"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd\") " Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.239909 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities" (OuterVolumeSpecName: "utilities") pod "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" (UID: "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.240570 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.262583 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m" (OuterVolumeSpecName: "kube-api-access-ql95m") pod "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" (UID: "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd"). InnerVolumeSpecName "kube-api-access-ql95m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.342975 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql95m\" (UniqueName: \"kubernetes.io/projected/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-kube-api-access-ql95m\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.377892 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" (UID: "90ea96e9-ad1e-49ab-b9f9-b60d2142aacd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.445987 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.608582 4791 generic.go:334] "Generic (PLEG): container finished" podID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerID="466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730" exitCode=0 Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.608658 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvm6h" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.608681 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerDied","Data":"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730"} Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.608744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvm6h" event={"ID":"90ea96e9-ad1e-49ab-b9f9-b60d2142aacd","Type":"ContainerDied","Data":"5fa8e1eb4cd5eb6b473bec7b29777c9dfd6032a45f03551fe3c346dbb1164a12"} Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.608768 4791 scope.go:117] "RemoveContainer" containerID="466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.612053 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerStarted","Data":"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310"} Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.612090 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerStarted","Data":"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671"} Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.647325 4791 scope.go:117] "RemoveContainer" containerID="9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.719523 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.719775 4791 scope.go:117] "RemoveContainer" containerID="91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.724998 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wvm6h"] Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.759717 4791 scope.go:117] "RemoveContainer" containerID="466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730" Feb 18 00:58:46 crc kubenswrapper[4791]: E0218 00:58:46.760205 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730\": container with ID starting with 466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730 not found: ID does not exist" containerID="466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.760238 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730"} err="failed to get container status \"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730\": rpc error: code = NotFound desc = could not find container \"466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730\": container with ID starting with 466ab734f4d998b0f43a831a6155aefa0bf20874599fa90eda8082b58833f730 not found: ID does not exist" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.760263 4791 scope.go:117] "RemoveContainer" containerID="9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b" Feb 18 00:58:46 crc kubenswrapper[4791]: E0218 00:58:46.760632 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b\": container with ID starting with 9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b not found: ID does not exist" containerID="9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.760657 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b"} err="failed to get container status \"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b\": rpc error: code = NotFound desc = could not find container \"9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b\": container with ID starting with 9b36e7871e18635dbf919fd0df751c56aad9eb498076dfeaf197784201ea673b not found: ID does not exist" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.760673 4791 scope.go:117] "RemoveContainer" containerID="91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f" Feb 18 00:58:46 crc kubenswrapper[4791]: E0218 00:58:46.760937 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f\": container with ID starting with 91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f not found: ID does not exist" containerID="91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f" Feb 18 00:58:46 crc kubenswrapper[4791]: I0218 00:58:46.760975 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f"} err="failed to get container status \"91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f\": rpc error: code = NotFound desc = could not find container \"91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f\": container with ID starting with 91059b958d53a70079f641fbb5feaf69ac224ef29059998f61aa3495170b687f not found: ID does not exist" Feb 18 00:58:47 crc kubenswrapper[4791]: I0218 00:58:47.073029 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" path="/var/lib/kubelet/pods/90ea96e9-ad1e-49ab-b9f9-b60d2142aacd/volumes" Feb 18 00:58:47 crc kubenswrapper[4791]: I0218 00:58:47.693343 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:47 crc kubenswrapper[4791]: I0218 00:58:47.693622 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.654643 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerStarted","Data":"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037"} Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.654943 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-central-agent" containerID="cri-o://616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936" gracePeriod=30 Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.655351 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.655624 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="proxy-httpd" containerID="cri-o://b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037" gracePeriod=30 Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.655757 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-notification-agent" containerID="cri-o://2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671" gracePeriod=30 Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.655820 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="sg-core" containerID="cri-o://dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310" gracePeriod=30 Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.688641 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.4694404260000002 podStartE2EDuration="5.688619073s" podCreationTimestamp="2026-02-18 00:58:43 +0000 UTC" firstStartedPulling="2026-02-18 00:58:44.493689806 +0000 UTC m=+1466.061702986" lastFinishedPulling="2026-02-18 00:58:47.712868463 +0000 UTC m=+1469.280881633" observedRunningTime="2026-02-18 00:58:48.677444757 +0000 UTC m=+1470.245457937" watchObservedRunningTime="2026-02-18 00:58:48.688619073 +0000 UTC m=+1470.256632243" Feb 18 00:58:48 crc kubenswrapper[4791]: I0218 00:58:48.917869 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.411630 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-qrrmp"] Feb 18 00:58:49 crc kubenswrapper[4791]: E0218 00:58:49.412433 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.412451 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" Feb 18 00:58:49 crc kubenswrapper[4791]: E0218 00:58:49.412465 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="extract-utilities" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.412472 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="extract-utilities" Feb 18 00:58:49 crc kubenswrapper[4791]: E0218 00:58:49.412497 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="extract-content" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.412504 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="extract-content" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.412699 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="90ea96e9-ad1e-49ab-b9f9-b60d2142aacd" containerName="registry-server" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.421021 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.423653 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.424084 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.427017 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-qrrmp"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.510977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.511036 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.511194 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77qk9\" (UniqueName: \"kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.511523 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.566542 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.568504 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.573706 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.583892 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.612991 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.613049 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.613105 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77qk9\" (UniqueName: \"kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.613219 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.622259 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.642285 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.663060 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77qk9\" (UniqueName: \"kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.663919 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data\") pod \"nova-cell0-cell-mapping-qrrmp\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716280 4791 generic.go:334] "Generic (PLEG): container finished" podID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerID="b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037" exitCode=0 Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716314 4791 generic.go:334] "Generic (PLEG): container finished" podID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerID="dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310" exitCode=2 Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716321 4791 generic.go:334] "Generic (PLEG): container finished" podID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerID="2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671" exitCode=0 Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716339 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerDied","Data":"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037"} Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716364 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerDied","Data":"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310"} Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.716373 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerDied","Data":"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671"} Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.717691 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.717753 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv8lj\" (UniqueName: \"kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.717794 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.746195 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.748433 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.754831 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.772232 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.774720 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819490 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819559 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjwmc\" (UniqueName: \"kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819590 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv8lj\" (UniqueName: \"kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819608 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819635 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819697 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.819717 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.825697 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.829855 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.836253 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.863374 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.868083 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.871467 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.872297 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv8lj\" (UniqueName: \"kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj\") pod \"nova-cell1-novncproxy-0\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.894272 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921476 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921518 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921554 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cvb8\" (UniqueName: \"kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921634 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjwmc\" (UniqueName: \"kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921655 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921679 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921771 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.921794 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.927543 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.930901 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.933966 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.935540 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.948975 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.951785 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjwmc\" (UniqueName: \"kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.956362 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " pod="openstack/nova-api-0" Feb 18 00:58:49 crc kubenswrapper[4791]: I0218 00:58:49.964003 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.005602 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.007789 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023675 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023730 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023870 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f45vl\" (UniqueName: \"kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023907 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023924 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023947 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.023970 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cvb8\" (UniqueName: \"kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.025500 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.034893 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.035397 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.044415 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.062948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cvb8\" (UniqueName: \"kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8\") pod \"nova-metadata-0\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.098182 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.107127 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.129989 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130030 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130084 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130170 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130196 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130224 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knbqq\" (UniqueName: \"kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130249 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130382 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.130432 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f45vl\" (UniqueName: \"kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.135101 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.146516 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.150477 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f45vl\" (UniqueName: \"kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl\") pod \"nova-scheduler-0\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233194 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233274 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233297 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233390 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233414 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.233440 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knbqq\" (UniqueName: \"kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.234887 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.235118 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.234892 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.235719 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.236041 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.258634 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knbqq\" (UniqueName: \"kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq\") pod \"dnsmasq-dns-5fbc4d444f-qnm85\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.429763 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.441265 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.499046 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.515481 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-qrrmp"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.748284 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b560aa26-6a1f-4695-aeaa-b1a80b0765ec","Type":"ContainerStarted","Data":"e8ca1782d68f08bda5a42ed45ff261a6db532c448cd1a3cca23fc9454e73a2de"} Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.767888 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qrrmp" event={"ID":"d90b881e-3f26-4065-addf-b131f132fef9","Type":"ContainerStarted","Data":"8b7b5c6a03e2de3b1bd807690e8a7bab8279b28268f57e23ff2fb43ba588ae9f"} Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.986991 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-n8x6z"] Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.989226 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.992497 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Feb 18 00:58:50 crc kubenswrapper[4791]: I0218 00:58:50.998755 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.007457 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-n8x6z"] Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.052563 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.052631 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.052669 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.052759 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4s94s\" (UniqueName: \"kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.155623 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4s94s\" (UniqueName: \"kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.155766 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.155806 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.155836 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.162291 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.174447 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.184986 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.205708 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4s94s\" (UniqueName: \"kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s\") pod \"nova-cell1-conductor-db-sync-n8x6z\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.318818 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.322102 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.625801 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:58:51 crc kubenswrapper[4791]: W0218 00:58:51.634281 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13f78453_b30b_4f2c_bec0_22c7e601d5c7.slice/crio-619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb WatchSource:0}: Error finding container 619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb: Status 404 returned error can't find the container with id 619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.672275 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:58:51 crc kubenswrapper[4791]: W0218 00:58:51.681322 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbace8397_f744_4d2a_bd15_e32f27ef9af7.slice/crio-bd80ae2ae1a1bb4bed3c3064ccde4f3e977a4d40fc0f22ed7c6f2fc02388d6f3 WatchSource:0}: Error finding container bd80ae2ae1a1bb4bed3c3064ccde4f3e977a4d40fc0f22ed7c6f2fc02388d6f3: Status 404 returned error can't find the container with id bd80ae2ae1a1bb4bed3c3064ccde4f3e977a4d40fc0f22ed7c6f2fc02388d6f3 Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.687329 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.785405 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" event={"ID":"78f72e20-5be7-448d-b18c-390ad193f0ea","Type":"ContainerStarted","Data":"9ea8af2c253e5e4ea04e52ef59cca000bcafb0085f56c314ba0c0b535cb6757d"} Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.797631 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bace8397-f744-4d2a-bd15-e32f27ef9af7","Type":"ContainerStarted","Data":"bd80ae2ae1a1bb4bed3c3064ccde4f3e977a4d40fc0f22ed7c6f2fc02388d6f3"} Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.814959 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qrrmp" event={"ID":"d90b881e-3f26-4065-addf-b131f132fef9","Type":"ContainerStarted","Data":"f4a8df5f98b22717c8caa569d9a6844f71b4c8379fac62b6a9d0f35b3f46e0f9"} Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.818428 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerStarted","Data":"619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb"} Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.836774 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerStarted","Data":"002aa758a16710b0543b05eb1d84d4937775376e3f85e82f0a5aab5c1dd31299"} Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.861721 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-qrrmp" podStartSLOduration=2.861701634 podStartE2EDuration="2.861701634s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:51.846443461 +0000 UTC m=+1473.414456631" watchObservedRunningTime="2026-02-18 00:58:51.861701634 +0000 UTC m=+1473.429714804" Feb 18 00:58:51 crc kubenswrapper[4791]: I0218 00:58:51.956548 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-n8x6z"] Feb 18 00:58:52 crc kubenswrapper[4791]: I0218 00:58:52.850764 4791 generic.go:334] "Generic (PLEG): container finished" podID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerID="a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0" exitCode=0 Feb 18 00:58:52 crc kubenswrapper[4791]: I0218 00:58:52.853683 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" event={"ID":"78f72e20-5be7-448d-b18c-390ad193f0ea","Type":"ContainerDied","Data":"a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0"} Feb 18 00:58:52 crc kubenswrapper[4791]: I0218 00:58:52.859596 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" event={"ID":"8aa052d2-11d5-4497-86e2-23c4e1f72f98","Type":"ContainerStarted","Data":"722311e8e860f6d69a718e1e20ad3069f3bbf561752bd9e2e48aba6cea14564f"} Feb 18 00:58:52 crc kubenswrapper[4791]: I0218 00:58:52.859635 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" event={"ID":"8aa052d2-11d5-4497-86e2-23c4e1f72f98","Type":"ContainerStarted","Data":"cc00085509ed8e785401ad71b4180ca53f5b6d685c683a32df2447284e10383b"} Feb 18 00:58:52 crc kubenswrapper[4791]: I0218 00:58:52.896694 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" podStartSLOduration=2.896677974 podStartE2EDuration="2.896677974s" podCreationTimestamp="2026-02-18 00:58:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:52.894460336 +0000 UTC m=+1474.462473506" watchObservedRunningTime="2026-02-18 00:58:52.896677974 +0000 UTC m=+1474.464691144" Feb 18 00:58:53 crc kubenswrapper[4791]: I0218 00:58:53.413029 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:58:53 crc kubenswrapper[4791]: I0218 00:58:53.452697 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.905467 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerStarted","Data":"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.906019 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerStarted","Data":"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.909105 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" event={"ID":"78f72e20-5be7-448d-b18c-390ad193f0ea","Type":"ContainerStarted","Data":"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.909620 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.917004 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bace8397-f744-4d2a-bd15-e32f27ef9af7","Type":"ContainerStarted","Data":"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.918504 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b560aa26-6a1f-4695-aeaa-b1a80b0765ec","Type":"ContainerStarted","Data":"9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.918572 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb" gracePeriod=30 Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.923585 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerStarted","Data":"3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.923632 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerStarted","Data":"4281bd153240a2db11bd85e303e0a03654d5bb8c72fce9e354d1e6e9ba248139"} Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.923784 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-log" containerID="cri-o://4281bd153240a2db11bd85e303e0a03654d5bb8c72fce9e354d1e6e9ba248139" gracePeriod=30 Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.923914 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-metadata" containerID="cri-o://3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd" gracePeriod=30 Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.935444 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.189189032 podStartE2EDuration="6.93542051s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="2026-02-18 00:58:51.31356957 +0000 UTC m=+1472.881582730" lastFinishedPulling="2026-02-18 00:58:55.059801038 +0000 UTC m=+1476.627814208" observedRunningTime="2026-02-18 00:58:55.921288272 +0000 UTC m=+1477.489301442" watchObservedRunningTime="2026-02-18 00:58:55.93542051 +0000 UTC m=+1477.503433680" Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.947120 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.571504537 podStartE2EDuration="6.947099631s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="2026-02-18 00:58:51.685316698 +0000 UTC m=+1473.253329868" lastFinishedPulling="2026-02-18 00:58:55.060911792 +0000 UTC m=+1476.628924962" observedRunningTime="2026-02-18 00:58:55.943210891 +0000 UTC m=+1477.511224081" watchObservedRunningTime="2026-02-18 00:58:55.947099631 +0000 UTC m=+1477.515112801" Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.969030 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.467191541 podStartE2EDuration="6.969006508s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="2026-02-18 00:58:50.567151384 +0000 UTC m=+1472.135164554" lastFinishedPulling="2026-02-18 00:58:55.068966351 +0000 UTC m=+1476.636979521" observedRunningTime="2026-02-18 00:58:55.957875954 +0000 UTC m=+1477.525889124" watchObservedRunningTime="2026-02-18 00:58:55.969006508 +0000 UTC m=+1477.537019678" Feb 18 00:58:55 crc kubenswrapper[4791]: I0218 00:58:55.989016 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" podStartSLOduration=6.988971976 podStartE2EDuration="6.988971976s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:58:55.978609386 +0000 UTC m=+1477.546622556" watchObservedRunningTime="2026-02-18 00:58:55.988971976 +0000 UTC m=+1477.556985146" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.007834 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.612934549 podStartE2EDuration="7.007810239s" podCreationTimestamp="2026-02-18 00:58:49 +0000 UTC" firstStartedPulling="2026-02-18 00:58:51.647597011 +0000 UTC m=+1473.215610181" lastFinishedPulling="2026-02-18 00:58:55.042472701 +0000 UTC m=+1476.610485871" observedRunningTime="2026-02-18 00:58:56.001954668 +0000 UTC m=+1477.569967838" watchObservedRunningTime="2026-02-18 00:58:56.007810239 +0000 UTC m=+1477.575823409" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.559030 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.602963 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.707960 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.708019 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wr2p\" (UniqueName: \"kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.708175 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.708219 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.708259 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.708524 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml\") pod \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\" (UID: \"4271166c-c69d-4f8c-bbe3-4c9702dd002b\") " Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.710885 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.710915 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.714198 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p" (OuterVolumeSpecName: "kube-api-access-8wr2p") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "kube-api-access-8wr2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.714918 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts" (OuterVolumeSpecName: "scripts") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.742514 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.747942 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data" (OuterVolumeSpecName: "config-data") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.799892 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.799926 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4271166c-c69d-4f8c-bbe3-4c9702dd002b" (UID: "4271166c-c69d-4f8c-bbe3-4c9702dd002b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.799956 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.800062 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.801052 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.801119 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166" gracePeriod=600 Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812449 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812485 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812503 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812515 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wr2p\" (UniqueName: \"kubernetes.io/projected/4271166c-c69d-4f8c-bbe3-4c9702dd002b-kube-api-access-8wr2p\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812527 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4271166c-c69d-4f8c-bbe3-4c9702dd002b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812538 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.812549 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4271166c-c69d-4f8c-bbe3-4c9702dd002b-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.939285 4791 generic.go:334] "Generic (PLEG): container finished" podID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerID="616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936" exitCode=0 Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.939385 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.939386 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerDied","Data":"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936"} Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.939503 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4271166c-c69d-4f8c-bbe3-4c9702dd002b","Type":"ContainerDied","Data":"a167d1bbcd10a5bba38a3b02cb66c2c3b43ef7b17a2c33b0cf80aed467fc1860"} Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.939528 4791 scope.go:117] "RemoveContainer" containerID="b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037" Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.947090 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166" exitCode=0 Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.947178 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166"} Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.948754 4791 generic.go:334] "Generic (PLEG): container finished" podID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerID="4281bd153240a2db11bd85e303e0a03654d5bb8c72fce9e354d1e6e9ba248139" exitCode=143 Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.948877 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerDied","Data":"4281bd153240a2db11bd85e303e0a03654d5bb8c72fce9e354d1e6e9ba248139"} Feb 18 00:58:56 crc kubenswrapper[4791]: I0218 00:58:56.980785 4791 scope.go:117] "RemoveContainer" containerID="dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.011675 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.014523 4791 scope.go:117] "RemoveContainer" containerID="2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.035385 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.045708 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.046307 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="sg-core" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046322 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="sg-core" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.046332 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="proxy-httpd" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046340 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="proxy-httpd" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.046392 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-notification-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046399 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-notification-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.046406 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-central-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046412 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-central-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046608 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-central-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046637 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="sg-core" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046651 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="ceilometer-notification-agent" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.046666 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" containerName="proxy-httpd" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.048744 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.056347 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.058320 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.060550 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.118878 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.118936 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.119022 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.119101 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wmcr\" (UniqueName: \"kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.119148 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.119820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.119915 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.127659 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4271166c-c69d-4f8c-bbe3-4c9702dd002b" path="/var/lib/kubelet/pods/4271166c-c69d-4f8c-bbe3-4c9702dd002b/volumes" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.145354 4791 scope.go:117] "RemoveContainer" containerID="616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.207260 4791 scope.go:117] "RemoveContainer" containerID="b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.207661 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037\": container with ID starting with b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037 not found: ID does not exist" containerID="b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.207695 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037"} err="failed to get container status \"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037\": rpc error: code = NotFound desc = could not find container \"b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037\": container with ID starting with b171524cf7f4eda12b966b39bd2026fd989e8202eec0bd7523a2e53f9f55a037 not found: ID does not exist" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.207717 4791 scope.go:117] "RemoveContainer" containerID="dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.207940 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310\": container with ID starting with dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310 not found: ID does not exist" containerID="dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.207990 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310"} err="failed to get container status \"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310\": rpc error: code = NotFound desc = could not find container \"dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310\": container with ID starting with dbf740840c52a3aa8e2b641755d45deef0e95aceea4f1ed9993977053ec1e310 not found: ID does not exist" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.208011 4791 scope.go:117] "RemoveContainer" containerID="2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.208266 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671\": container with ID starting with 2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671 not found: ID does not exist" containerID="2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.208310 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671"} err="failed to get container status \"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671\": rpc error: code = NotFound desc = could not find container \"2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671\": container with ID starting with 2ed89c50025808c68f9c8d4f15f3c4882799a85e1ce3c319c88e04bda2117671 not found: ID does not exist" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.208329 4791 scope.go:117] "RemoveContainer" containerID="616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936" Feb 18 00:58:57 crc kubenswrapper[4791]: E0218 00:58:57.208559 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936\": container with ID starting with 616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936 not found: ID does not exist" containerID="616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.208590 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936"} err="failed to get container status \"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936\": rpc error: code = NotFound desc = could not find container \"616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936\": container with ID starting with 616cc2f71dded61314493713060dcc420d4fcafaba5707367e9516601c5f0936 not found: ID does not exist" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.208607 4791 scope.go:117] "RemoveContainer" containerID="c9007556fe5d18b5a46e46ece707f7a5089104141aff41585216532707c5ee80" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.224249 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wmcr\" (UniqueName: \"kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.224353 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.224444 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.224534 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.224832 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.226750 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.227363 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.227529 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.227928 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.244451 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.245668 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.246114 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.247206 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.248727 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wmcr\" (UniqueName: \"kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr\") pod \"ceilometer-0\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.507619 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:58:57 crc kubenswrapper[4791]: I0218 00:58:57.972917 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a"} Feb 18 00:58:58 crc kubenswrapper[4791]: I0218 00:58:58.033447 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:58:58 crc kubenswrapper[4791]: W0218 00:58:58.036970 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b6491f1_bdcf_4dee_8431_f40a42f31974.slice/crio-a51bb8a25a1dba1e6234536f996ccefa6b404885c68c86e9da2fc40b026a6320 WatchSource:0}: Error finding container a51bb8a25a1dba1e6234536f996ccefa6b404885c68c86e9da2fc40b026a6320: Status 404 returned error can't find the container with id a51bb8a25a1dba1e6234536f996ccefa6b404885c68c86e9da2fc40b026a6320 Feb 18 00:58:58 crc kubenswrapper[4791]: I0218 00:58:58.984739 4791 generic.go:334] "Generic (PLEG): container finished" podID="d90b881e-3f26-4065-addf-b131f132fef9" containerID="f4a8df5f98b22717c8caa569d9a6844f71b4c8379fac62b6a9d0f35b3f46e0f9" exitCode=0 Feb 18 00:58:58 crc kubenswrapper[4791]: I0218 00:58:58.984771 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qrrmp" event={"ID":"d90b881e-3f26-4065-addf-b131f132fef9","Type":"ContainerDied","Data":"f4a8df5f98b22717c8caa569d9a6844f71b4c8379fac62b6a9d0f35b3f46e0f9"} Feb 18 00:58:58 crc kubenswrapper[4791]: I0218 00:58:58.986804 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerStarted","Data":"4c7612fd6f6a0fa38bd600643f3c0070cfd7c993fd1e225c0e406af5cf936b53"} Feb 18 00:58:58 crc kubenswrapper[4791]: I0218 00:58:58.986852 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerStarted","Data":"a51bb8a25a1dba1e6234536f996ccefa6b404885c68c86e9da2fc40b026a6320"} Feb 18 00:58:59 crc kubenswrapper[4791]: I0218 00:58:59.894757 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.000540 4791 generic.go:334] "Generic (PLEG): container finished" podID="8aa052d2-11d5-4497-86e2-23c4e1f72f98" containerID="722311e8e860f6d69a718e1e20ad3069f3bbf561752bd9e2e48aba6cea14564f" exitCode=0 Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.000854 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" event={"ID":"8aa052d2-11d5-4497-86e2-23c4e1f72f98","Type":"ContainerDied","Data":"722311e8e860f6d69a718e1e20ad3069f3bbf561752bd9e2e48aba6cea14564f"} Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.004368 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerStarted","Data":"42a78a94641d73380c44ac477a87bcde1b640e51e613cbecb083c0e82635a7c5"} Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.099044 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.099111 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.108188 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.108459 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.167225 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-tw6qw"] Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.168679 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.183261 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-tw6qw"] Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.261835 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0d1c-account-create-update-wj6st"] Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.263879 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.280811 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0d1c-account-create-update-wj6st"] Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.282750 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.309492 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trqw4\" (UniqueName: \"kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.309734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.411628 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.411740 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.411799 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kj9k\" (UniqueName: \"kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.411870 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trqw4\" (UniqueName: \"kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.413505 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.430707 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.430780 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.443367 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.444237 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trqw4\" (UniqueName: \"kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4\") pod \"aodh-db-create-tw6qw\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.490524 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.508713 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.513496 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kj9k\" (UniqueName: \"kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.513670 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.514746 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.542801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kj9k\" (UniqueName: \"kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k\") pod \"aodh-0d1c-account-create-update-wj6st\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.545521 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.545779 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="dnsmasq-dns" containerID="cri-o://b2d575b2f092ad1eebb2ee48db390d5784d8f14a4ffdc8c3defc64bb910245e7" gracePeriod=10 Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.608268 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.786208 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.926635 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts\") pod \"d90b881e-3f26-4065-addf-b131f132fef9\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.926792 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77qk9\" (UniqueName: \"kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9\") pod \"d90b881e-3f26-4065-addf-b131f132fef9\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.926856 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle\") pod \"d90b881e-3f26-4065-addf-b131f132fef9\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.926883 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data\") pod \"d90b881e-3f26-4065-addf-b131f132fef9\" (UID: \"d90b881e-3f26-4065-addf-b131f132fef9\") " Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.932598 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts" (OuterVolumeSpecName: "scripts") pod "d90b881e-3f26-4065-addf-b131f132fef9" (UID: "d90b881e-3f26-4065-addf-b131f132fef9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.947660 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9" (OuterVolumeSpecName: "kube-api-access-77qk9") pod "d90b881e-3f26-4065-addf-b131f132fef9" (UID: "d90b881e-3f26-4065-addf-b131f132fef9"). InnerVolumeSpecName "kube-api-access-77qk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:00 crc kubenswrapper[4791]: I0218 00:59:00.983358 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data" (OuterVolumeSpecName: "config-data") pod "d90b881e-3f26-4065-addf-b131f132fef9" (UID: "d90b881e-3f26-4065-addf-b131f132fef9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.029243 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77qk9\" (UniqueName: \"kubernetes.io/projected/d90b881e-3f26-4065-addf-b131f132fef9-kube-api-access-77qk9\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.029276 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.029285 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.081311 4791 generic.go:334] "Generic (PLEG): container finished" podID="26012547-d6fc-44de-9ad8-413d40acfb88" containerID="b2d575b2f092ad1eebb2ee48db390d5784d8f14a4ffdc8c3defc64bb910245e7" exitCode=0 Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.122473 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qrrmp" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.154476 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" event={"ID":"26012547-d6fc-44de-9ad8-413d40acfb88","Type":"ContainerDied","Data":"b2d575b2f092ad1eebb2ee48db390d5784d8f14a4ffdc8c3defc64bb910245e7"} Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.154511 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qrrmp" event={"ID":"d90b881e-3f26-4065-addf-b131f132fef9","Type":"ContainerDied","Data":"8b7b5c6a03e2de3b1bd807690e8a7bab8279b28268f57e23ff2fb43ba588ae9f"} Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.154527 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b7b5c6a03e2de3b1bd807690e8a7bab8279b28268f57e23ff2fb43ba588ae9f" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.163372 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerStarted","Data":"df765d183742581ea1577d91862b93b2592ae80aae1783f3f80e35a4fb81228c"} Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.184400 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.184479 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.185038 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d90b881e-3f26-4065-addf-b131f132fef9" (UID: "d90b881e-3f26-4065-addf-b131f132fef9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.233192 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90b881e-3f26-4065-addf-b131f132fef9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.250864 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.251072 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-log" containerID="cri-o://4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83" gracePeriod=30 Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.252043 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-api" containerID="cri-o://f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e" gracePeriod=30 Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.296692 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.300824 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.388711 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-tw6qw"] Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.415058 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.461420 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0d1c-account-create-update-wj6st"] Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566461 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566598 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566657 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566812 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566840 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt899\" (UniqueName: \"kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.566938 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0\") pod \"26012547-d6fc-44de-9ad8-413d40acfb88\" (UID: \"26012547-d6fc-44de-9ad8-413d40acfb88\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.589650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899" (OuterVolumeSpecName: "kube-api-access-mt899") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "kube-api-access-mt899". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.640192 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.669780 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt899\" (UniqueName: \"kubernetes.io/projected/26012547-d6fc-44de-9ad8-413d40acfb88-kube-api-access-mt899\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.669810 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.798420 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config" (OuterVolumeSpecName: "config") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.809270 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.845605 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.873952 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.873974 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.873984 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.876566 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "26012547-d6fc-44de-9ad8-413d40acfb88" (UID: "26012547-d6fc-44de-9ad8-413d40acfb88"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.894654 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.975494 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data\") pod \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.975831 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle\") pod \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.975896 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts\") pod \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.975973 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4s94s\" (UniqueName: \"kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s\") pod \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\" (UID: \"8aa052d2-11d5-4497-86e2-23c4e1f72f98\") " Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.976500 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26012547-d6fc-44de-9ad8-413d40acfb88-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.983885 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s" (OuterVolumeSpecName: "kube-api-access-4s94s") pod "8aa052d2-11d5-4497-86e2-23c4e1f72f98" (UID: "8aa052d2-11d5-4497-86e2-23c4e1f72f98"). InnerVolumeSpecName "kube-api-access-4s94s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:01 crc kubenswrapper[4791]: I0218 00:59:01.987087 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts" (OuterVolumeSpecName: "scripts") pod "8aa052d2-11d5-4497-86e2-23c4e1f72f98" (UID: "8aa052d2-11d5-4497-86e2-23c4e1f72f98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.015116 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8aa052d2-11d5-4497-86e2-23c4e1f72f98" (UID: "8aa052d2-11d5-4497-86e2-23c4e1f72f98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.064407 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data" (OuterVolumeSpecName: "config-data") pod "8aa052d2-11d5-4497-86e2-23c4e1f72f98" (UID: "8aa052d2-11d5-4497-86e2-23c4e1f72f98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.078326 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4s94s\" (UniqueName: \"kubernetes.io/projected/8aa052d2-11d5-4497-86e2-23c4e1f72f98-kube-api-access-4s94s\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.078364 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.078376 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.078388 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8aa052d2-11d5-4497-86e2-23c4e1f72f98-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.139605 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 18 00:59:02 crc kubenswrapper[4791]: E0218 00:59:02.140413 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="init" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140433 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="init" Feb 18 00:59:02 crc kubenswrapper[4791]: E0218 00:59:02.140467 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d90b881e-3f26-4065-addf-b131f132fef9" containerName="nova-manage" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140475 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d90b881e-3f26-4065-addf-b131f132fef9" containerName="nova-manage" Feb 18 00:59:02 crc kubenswrapper[4791]: E0218 00:59:02.140494 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8aa052d2-11d5-4497-86e2-23c4e1f72f98" containerName="nova-cell1-conductor-db-sync" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140502 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8aa052d2-11d5-4497-86e2-23c4e1f72f98" containerName="nova-cell1-conductor-db-sync" Feb 18 00:59:02 crc kubenswrapper[4791]: E0218 00:59:02.140521 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="dnsmasq-dns" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140529 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="dnsmasq-dns" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140805 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="dnsmasq-dns" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140846 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d90b881e-3f26-4065-addf-b131f132fef9" containerName="nova-manage" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.140865 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8aa052d2-11d5-4497-86e2-23c4e1f72f98" containerName="nova-cell1-conductor-db-sync" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.141905 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.158827 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.194394 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tw6qw" event={"ID":"6fb61a28-f5c5-4c33-8706-4eb4a7160424","Type":"ContainerStarted","Data":"2ecf53d2211beec4940711c5b2dfa9bd19e0857d9d27dc4019b23acb88e376e2"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.194443 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tw6qw" event={"ID":"6fb61a28-f5c5-4c33-8706-4eb4a7160424","Type":"ContainerStarted","Data":"78e19741f576cd1bd7be7db77078ca0f9b0ba4dafbfe28490b9f001a8d83a71d"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.201753 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0d1c-account-create-update-wj6st" event={"ID":"cc0535df-5769-4e87-bfbd-5bec07e3dda3","Type":"ContainerStarted","Data":"461c1e0ba7ac0aee40962224c0e5b878b45e335c985f36f6201801cd79b20d16"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.201795 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0d1c-account-create-update-wj6st" event={"ID":"cc0535df-5769-4e87-bfbd-5bec07e3dda3","Type":"ContainerStarted","Data":"3654f312a6d99053a9ba9f3f4205bfd1cefc52ecc4fa253f9c7db869e1a3329d"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.217107 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.217039 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-n8x6z" event={"ID":"8aa052d2-11d5-4497-86e2-23c4e1f72f98","Type":"ContainerDied","Data":"cc00085509ed8e785401ad71b4180ca53f5b6d685c683a32df2447284e10383b"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.217199 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc00085509ed8e785401ad71b4180ca53f5b6d685c683a32df2447284e10383b" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.218190 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-tw6qw" podStartSLOduration=2.218150968 podStartE2EDuration="2.218150968s" podCreationTimestamp="2026-02-18 00:59:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:02.213595208 +0000 UTC m=+1483.781608538" watchObservedRunningTime="2026-02-18 00:59:02.218150968 +0000 UTC m=+1483.786164138" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.225931 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerStarted","Data":"2f82488cbc129cf4cbe31cb94ccdf8a22f518c1ba381f0400f3e7babdd9f87fd"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.228336 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.235357 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerID="4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83" exitCode=143 Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.235502 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerDied","Data":"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.239288 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="bace8397-f744-4d2a-bd15-e32f27ef9af7" containerName="nova-scheduler-scheduler" containerID="cri-o://525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015" gracePeriod=30 Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.239377 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" event={"ID":"26012547-d6fc-44de-9ad8-413d40acfb88","Type":"ContainerDied","Data":"f0f96c39cd0023f46e3c1c818deef862f8e35d7176c1f2e77bc560252c96ca9d"} Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.239841 4791 scope.go:117] "RemoveContainer" containerID="b2d575b2f092ad1eebb2ee48db390d5784d8f14a4ffdc8c3defc64bb910245e7" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.239498 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.275832 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0d1c-account-create-update-wj6st" podStartSLOduration=2.275813812 podStartE2EDuration="2.275813812s" podCreationTimestamp="2026-02-18 00:59:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:02.229377526 +0000 UTC m=+1483.797390696" watchObservedRunningTime="2026-02-18 00:59:02.275813812 +0000 UTC m=+1483.843826982" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.296661 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.296700 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgx4f\" (UniqueName: \"kubernetes.io/projected/686dcc60-b95f-43ca-bdb2-5045f8289bec-kube-api-access-tgx4f\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.296778 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.306178 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.701019666 podStartE2EDuration="6.30614772s" podCreationTimestamp="2026-02-18 00:58:56 +0000 UTC" firstStartedPulling="2026-02-18 00:58:58.043825861 +0000 UTC m=+1479.611839031" lastFinishedPulling="2026-02-18 00:59:01.648953915 +0000 UTC m=+1483.216967085" observedRunningTime="2026-02-18 00:59:02.262739267 +0000 UTC m=+1483.830752437" watchObservedRunningTime="2026-02-18 00:59:02.30614772 +0000 UTC m=+1483.874160890" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.312764 4791 scope.go:117] "RemoveContainer" containerID="921841858db002c0a71f098c0f7f4cbc7bdba9e7203e36a601069520aefea89e" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.326431 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.338148 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-9skqg"] Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.398758 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.398817 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgx4f\" (UniqueName: \"kubernetes.io/projected/686dcc60-b95f-43ca-bdb2-5045f8289bec-kube-api-access-tgx4f\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.398866 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.403794 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.404048 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686dcc60-b95f-43ca-bdb2-5045f8289bec-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.417403 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgx4f\" (UniqueName: \"kubernetes.io/projected/686dcc60-b95f-43ca-bdb2-5045f8289bec-kube-api-access-tgx4f\") pod \"nova-cell1-conductor-0\" (UID: \"686dcc60-b95f-43ca-bdb2-5045f8289bec\") " pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:02 crc kubenswrapper[4791]: I0218 00:59:02.493838 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.036342 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.095828 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" path="/var/lib/kubelet/pods/26012547-d6fc-44de-9ad8-413d40acfb88/volumes" Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.269443 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fb61a28-f5c5-4c33-8706-4eb4a7160424" containerID="2ecf53d2211beec4940711c5b2dfa9bd19e0857d9d27dc4019b23acb88e376e2" exitCode=0 Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.269727 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tw6qw" event={"ID":"6fb61a28-f5c5-4c33-8706-4eb4a7160424","Type":"ContainerDied","Data":"2ecf53d2211beec4940711c5b2dfa9bd19e0857d9d27dc4019b23acb88e376e2"} Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.276587 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"686dcc60-b95f-43ca-bdb2-5045f8289bec","Type":"ContainerStarted","Data":"9a6309853fe9d0818b223238390948d4fde60ed9d68c209b62f581fa7e374d05"} Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.289618 4791 generic.go:334] "Generic (PLEG): container finished" podID="cc0535df-5769-4e87-bfbd-5bec07e3dda3" containerID="461c1e0ba7ac0aee40962224c0e5b878b45e335c985f36f6201801cd79b20d16" exitCode=0 Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.291398 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0d1c-account-create-update-wj6st" event={"ID":"cc0535df-5769-4e87-bfbd-5bec07e3dda3","Type":"ContainerDied","Data":"461c1e0ba7ac0aee40962224c0e5b878b45e335c985f36f6201801cd79b20d16"} Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.835899 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.980775 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle\") pod \"bace8397-f744-4d2a-bd15-e32f27ef9af7\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.981079 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data\") pod \"bace8397-f744-4d2a-bd15-e32f27ef9af7\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " Feb 18 00:59:03 crc kubenswrapper[4791]: I0218 00:59:03.981277 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f45vl\" (UniqueName: \"kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl\") pod \"bace8397-f744-4d2a-bd15-e32f27ef9af7\" (UID: \"bace8397-f744-4d2a-bd15-e32f27ef9af7\") " Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.027298 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bace8397-f744-4d2a-bd15-e32f27ef9af7" (UID: "bace8397-f744-4d2a-bd15-e32f27ef9af7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.027474 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl" (OuterVolumeSpecName: "kube-api-access-f45vl") pod "bace8397-f744-4d2a-bd15-e32f27ef9af7" (UID: "bace8397-f744-4d2a-bd15-e32f27ef9af7"). InnerVolumeSpecName "kube-api-access-f45vl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.031279 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data" (OuterVolumeSpecName: "config-data") pod "bace8397-f744-4d2a-bd15-e32f27ef9af7" (UID: "bace8397-f744-4d2a-bd15-e32f27ef9af7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.084860 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f45vl\" (UniqueName: \"kubernetes.io/projected/bace8397-f744-4d2a-bd15-e32f27ef9af7-kube-api-access-f45vl\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.085123 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.085203 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bace8397-f744-4d2a-bd15-e32f27ef9af7-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.316621 4791 generic.go:334] "Generic (PLEG): container finished" podID="bace8397-f744-4d2a-bd15-e32f27ef9af7" containerID="525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015" exitCode=0 Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.316697 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bace8397-f744-4d2a-bd15-e32f27ef9af7","Type":"ContainerDied","Data":"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015"} Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.316734 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bace8397-f744-4d2a-bd15-e32f27ef9af7","Type":"ContainerDied","Data":"bd80ae2ae1a1bb4bed3c3064ccde4f3e977a4d40fc0f22ed7c6f2fc02388d6f3"} Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.316755 4791 scope.go:117] "RemoveContainer" containerID="525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.317139 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.322222 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"686dcc60-b95f-43ca-bdb2-5045f8289bec","Type":"ContainerStarted","Data":"5ed760ef7b5425b2980d904a32f5566c8ad61306e431f41cd7de94dbc2447c95"} Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.322776 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.341691 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.341673448 podStartE2EDuration="2.341673448s" podCreationTimestamp="2026-02-18 00:59:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:04.33914719 +0000 UTC m=+1485.907160360" watchObservedRunningTime="2026-02-18 00:59:04.341673448 +0000 UTC m=+1485.909686618" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.351423 4791 scope.go:117] "RemoveContainer" containerID="525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015" Feb 18 00:59:04 crc kubenswrapper[4791]: E0218 00:59:04.353663 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015\": container with ID starting with 525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015 not found: ID does not exist" containerID="525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.354023 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015"} err="failed to get container status \"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015\": rpc error: code = NotFound desc = could not find container \"525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015\": container with ID starting with 525d2635e2fb6dbf0eb57e36d74b048a1e5905e8080faf2048ee087de5b09015 not found: ID does not exist" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.394384 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.409912 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.423311 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:04 crc kubenswrapper[4791]: E0218 00:59:04.423869 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bace8397-f744-4d2a-bd15-e32f27ef9af7" containerName="nova-scheduler-scheduler" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.423889 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="bace8397-f744-4d2a-bd15-e32f27ef9af7" containerName="nova-scheduler-scheduler" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.424170 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="bace8397-f744-4d2a-bd15-e32f27ef9af7" containerName="nova-scheduler-scheduler" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.425011 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.429377 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.448442 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.495820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzscq\" (UniqueName: \"kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.495916 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.496038 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.599866 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.599997 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzscq\" (UniqueName: \"kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.600076 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.605388 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.611823 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.627989 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzscq\" (UniqueName: \"kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq\") pod \"nova-scheduler-0\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.760648 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.976461 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:04 crc kubenswrapper[4791]: I0218 00:59:04.985369 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.107911 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bace8397-f744-4d2a-bd15-e32f27ef9af7" path="/var/lib/kubelet/pods/bace8397-f744-4d2a-bd15-e32f27ef9af7/volumes" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.114524 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts\") pod \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.114624 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kj9k\" (UniqueName: \"kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k\") pod \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\" (UID: \"cc0535df-5769-4e87-bfbd-5bec07e3dda3\") " Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.114651 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts\") pod \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.114919 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trqw4\" (UniqueName: \"kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4\") pod \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\" (UID: \"6fb61a28-f5c5-4c33-8706-4eb4a7160424\") " Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.115508 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc0535df-5769-4e87-bfbd-5bec07e3dda3" (UID: "cc0535df-5769-4e87-bfbd-5bec07e3dda3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.118676 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6fb61a28-f5c5-4c33-8706-4eb4a7160424" (UID: "6fb61a28-f5c5-4c33-8706-4eb4a7160424"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.125389 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k" (OuterVolumeSpecName: "kube-api-access-9kj9k") pod "cc0535df-5769-4e87-bfbd-5bec07e3dda3" (UID: "cc0535df-5769-4e87-bfbd-5bec07e3dda3"). InnerVolumeSpecName "kube-api-access-9kj9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.126539 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4" (OuterVolumeSpecName: "kube-api-access-trqw4") pod "6fb61a28-f5c5-4c33-8706-4eb4a7160424" (UID: "6fb61a28-f5c5-4c33-8706-4eb4a7160424"). InnerVolumeSpecName "kube-api-access-trqw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.219243 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trqw4\" (UniqueName: \"kubernetes.io/projected/6fb61a28-f5c5-4c33-8706-4eb4a7160424-kube-api-access-trqw4\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.219290 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc0535df-5769-4e87-bfbd-5bec07e3dda3-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.219309 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kj9k\" (UniqueName: \"kubernetes.io/projected/cc0535df-5769-4e87-bfbd-5bec07e3dda3-kube-api-access-9kj9k\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.219331 4791 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb61a28-f5c5-4c33-8706-4eb4a7160424-operator-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.337226 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0d1c-account-create-update-wj6st" event={"ID":"cc0535df-5769-4e87-bfbd-5bec07e3dda3","Type":"ContainerDied","Data":"3654f312a6d99053a9ba9f3f4205bfd1cefc52ecc4fa253f9c7db869e1a3329d"} Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.337516 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3654f312a6d99053a9ba9f3f4205bfd1cefc52ecc4fa253f9c7db869e1a3329d" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.337237 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0d1c-account-create-update-wj6st" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.340705 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tw6qw" event={"ID":"6fb61a28-f5c5-4c33-8706-4eb4a7160424","Type":"ContainerDied","Data":"78e19741f576cd1bd7be7db77078ca0f9b0ba4dafbfe28490b9f001a8d83a71d"} Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.340742 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78e19741f576cd1bd7be7db77078ca0f9b0ba4dafbfe28490b9f001a8d83a71d" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.340796 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tw6qw" Feb 18 00:59:05 crc kubenswrapper[4791]: I0218 00:59:05.350050 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:06 crc kubenswrapper[4791]: I0218 00:59:06.090233 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-f6bc4c6c9-9skqg" podUID="26012547-d6fc-44de-9ad8-413d40acfb88" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.220:5353: i/o timeout" Feb 18 00:59:06 crc kubenswrapper[4791]: I0218 00:59:06.353302 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"11c5a6bf-395f-4b92-a6a3-99176c573b6b","Type":"ContainerStarted","Data":"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0"} Feb 18 00:59:06 crc kubenswrapper[4791]: I0218 00:59:06.353347 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"11c5a6bf-395f-4b92-a6a3-99176c573b6b","Type":"ContainerStarted","Data":"ee16799ecf56a8530fee223be8d79d0f4c103d8a0ef1d93114f28779e938611e"} Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.290664 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.313938 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.313919266 podStartE2EDuration="4.313919266s" podCreationTimestamp="2026-02-18 00:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:06.378257137 +0000 UTC m=+1487.946270317" watchObservedRunningTime="2026-02-18 00:59:08.313919266 +0000 UTC m=+1489.881932436" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.386682 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerID="f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e" exitCode=0 Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.386727 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerDied","Data":"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e"} Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.386752 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c2b1b43a-a26e-4c38-8efa-055269e9e34f","Type":"ContainerDied","Data":"002aa758a16710b0543b05eb1d84d4937775376e3f85e82f0a5aab5c1dd31299"} Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.386768 4791 scope.go:117] "RemoveContainer" containerID="f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.386838 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.411181 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle\") pod \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.411396 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjwmc\" (UniqueName: \"kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc\") pod \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.411451 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs\") pod \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.411541 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data\") pod \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\" (UID: \"c2b1b43a-a26e-4c38-8efa-055269e9e34f\") " Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.411899 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs" (OuterVolumeSpecName: "logs") pod "c2b1b43a-a26e-4c38-8efa-055269e9e34f" (UID: "c2b1b43a-a26e-4c38-8efa-055269e9e34f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.412638 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2b1b43a-a26e-4c38-8efa-055269e9e34f-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.418609 4791 scope.go:117] "RemoveContainer" containerID="4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.427999 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc" (OuterVolumeSpecName: "kube-api-access-wjwmc") pod "c2b1b43a-a26e-4c38-8efa-055269e9e34f" (UID: "c2b1b43a-a26e-4c38-8efa-055269e9e34f"). InnerVolumeSpecName "kube-api-access-wjwmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.456274 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2b1b43a-a26e-4c38-8efa-055269e9e34f" (UID: "c2b1b43a-a26e-4c38-8efa-055269e9e34f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.461314 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data" (OuterVolumeSpecName: "config-data") pod "c2b1b43a-a26e-4c38-8efa-055269e9e34f" (UID: "c2b1b43a-a26e-4c38-8efa-055269e9e34f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.514796 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjwmc\" (UniqueName: \"kubernetes.io/projected/c2b1b43a-a26e-4c38-8efa-055269e9e34f-kube-api-access-wjwmc\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.515114 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.515125 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1b43a-a26e-4c38-8efa-055269e9e34f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.556741 4791 scope.go:117] "RemoveContainer" containerID="f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.557512 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e\": container with ID starting with f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e not found: ID does not exist" containerID="f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.557562 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e"} err="failed to get container status \"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e\": rpc error: code = NotFound desc = could not find container \"f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e\": container with ID starting with f402802f065138b9b06119ca78f052b4c31518d7c4670651045a82631840955e not found: ID does not exist" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.557610 4791 scope.go:117] "RemoveContainer" containerID="4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.558018 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83\": container with ID starting with 4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83 not found: ID does not exist" containerID="4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.558044 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83"} err="failed to get container status \"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83\": rpc error: code = NotFound desc = could not find container \"4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83\": container with ID starting with 4ecbc2ede1e03036be32813f12d2bb8c77529bf973e9c3be7510c7813eb11b83 not found: ID does not exist" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.728122 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.739241 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.752309 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.752754 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-api" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.752770 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-api" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.752796 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-log" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.752803 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-log" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.752825 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc0535df-5769-4e87-bfbd-5bec07e3dda3" containerName="mariadb-account-create-update" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.752831 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc0535df-5769-4e87-bfbd-5bec07e3dda3" containerName="mariadb-account-create-update" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.752848 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb61a28-f5c5-4c33-8706-4eb4a7160424" containerName="mariadb-database-create" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.752854 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb61a28-f5c5-4c33-8706-4eb4a7160424" containerName="mariadb-database-create" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.753054 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb61a28-f5c5-4c33-8706-4eb4a7160424" containerName="mariadb-database-create" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.753067 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-api" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.753080 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc0535df-5769-4e87-bfbd-5bec07e3dda3" containerName="mariadb-account-create-update" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.753091 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" containerName="nova-api-log" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.754311 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.757889 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.779097 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.922167 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6rmc\" (UniqueName: \"kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.922240 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.922273 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: I0218 00:59:08.922305 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:08 crc kubenswrapper[4791]: E0218 00:59:08.952094 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2b1b43a_a26e_4c38_8efa_055269e9e34f.slice/crio-002aa758a16710b0543b05eb1d84d4937775376e3f85e82f0a5aab5c1dd31299\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2b1b43a_a26e_4c38_8efa_055269e9e34f.slice\": RecentStats: unable to find data in memory cache]" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.024582 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6rmc\" (UniqueName: \"kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.024639 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.024663 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.024702 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.025140 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.029322 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.029336 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.042725 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6rmc\" (UniqueName: \"kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc\") pod \"nova-api-0\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.073617 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2b1b43a-a26e-4c38-8efa-055269e9e34f" path="/var/lib/kubelet/pods/c2b1b43a-a26e-4c38-8efa-055269e9e34f/volumes" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.075798 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.600902 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:09 crc kubenswrapper[4791]: I0218 00:59:09.764129 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.413390 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerStarted","Data":"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66"} Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.413806 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerStarted","Data":"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019"} Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.413828 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerStarted","Data":"5b3455353264e4e9807a77c72e3f16539188710dce69f9732c4c1da26632f3a5"} Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.492565 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.492538719 podStartE2EDuration="2.492538719s" podCreationTimestamp="2026-02-18 00:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:10.442444229 +0000 UTC m=+1492.010457399" watchObservedRunningTime="2026-02-18 00:59:10.492538719 +0000 UTC m=+1492.060551889" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.504089 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-7bqdn"] Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.505846 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.509438 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-n4w9p" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.509468 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.509490 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.509922 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.519922 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-7bqdn"] Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.676263 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.676541 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.676562 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrgs5\" (UniqueName: \"kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.676836 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.779793 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.779866 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.779906 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrgs5\" (UniqueName: \"kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.780090 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.786571 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.787097 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.787494 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.795660 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrgs5\" (UniqueName: \"kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5\") pod \"aodh-db-sync-7bqdn\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:10 crc kubenswrapper[4791]: I0218 00:59:10.830746 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:11 crc kubenswrapper[4791]: I0218 00:59:11.325535 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-7bqdn"] Feb 18 00:59:11 crc kubenswrapper[4791]: I0218 00:59:11.424725 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7bqdn" event={"ID":"34c9d979-fc5d-4c7d-951c-ff13f8814802","Type":"ContainerStarted","Data":"c8b2b2d50b33a103627b81d6a34447ebea61953f7edda092f54f08a611434178"} Feb 18 00:59:12 crc kubenswrapper[4791]: I0218 00:59:12.527405 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Feb 18 00:59:14 crc kubenswrapper[4791]: I0218 00:59:14.763993 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 18 00:59:14 crc kubenswrapper[4791]: I0218 00:59:14.803129 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 18 00:59:15 crc kubenswrapper[4791]: I0218 00:59:15.482816 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7bqdn" event={"ID":"34c9d979-fc5d-4c7d-951c-ff13f8814802","Type":"ContainerStarted","Data":"64f3d2f00c0405ca692a5efb494493436d782e5a5ff691a2062cf8b44627c753"} Feb 18 00:59:15 crc kubenswrapper[4791]: I0218 00:59:15.531861 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 18 00:59:15 crc kubenswrapper[4791]: I0218 00:59:15.532005 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-7bqdn" podStartSLOduration=1.873318975 podStartE2EDuration="5.531876991s" podCreationTimestamp="2026-02-18 00:59:10 +0000 UTC" firstStartedPulling="2026-02-18 00:59:11.330738303 +0000 UTC m=+1492.898751473" lastFinishedPulling="2026-02-18 00:59:14.989296319 +0000 UTC m=+1496.557309489" observedRunningTime="2026-02-18 00:59:15.498748126 +0000 UTC m=+1497.066761326" watchObservedRunningTime="2026-02-18 00:59:15.531876991 +0000 UTC m=+1497.099890161" Feb 18 00:59:18 crc kubenswrapper[4791]: I0218 00:59:18.516702 4791 generic.go:334] "Generic (PLEG): container finished" podID="34c9d979-fc5d-4c7d-951c-ff13f8814802" containerID="64f3d2f00c0405ca692a5efb494493436d782e5a5ff691a2062cf8b44627c753" exitCode=0 Feb 18 00:59:18 crc kubenswrapper[4791]: I0218 00:59:18.516776 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7bqdn" event={"ID":"34c9d979-fc5d-4c7d-951c-ff13f8814802","Type":"ContainerDied","Data":"64f3d2f00c0405ca692a5efb494493436d782e5a5ff691a2062cf8b44627c753"} Feb 18 00:59:19 crc kubenswrapper[4791]: I0218 00:59:19.087540 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:19 crc kubenswrapper[4791]: I0218 00:59:19.088503 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:19 crc kubenswrapper[4791]: I0218 00:59:19.961513 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.116305 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle\") pod \"34c9d979-fc5d-4c7d-951c-ff13f8814802\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.116613 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data\") pod \"34c9d979-fc5d-4c7d-951c-ff13f8814802\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.116684 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts\") pod \"34c9d979-fc5d-4c7d-951c-ff13f8814802\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.116855 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrgs5\" (UniqueName: \"kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5\") pod \"34c9d979-fc5d-4c7d-951c-ff13f8814802\" (UID: \"34c9d979-fc5d-4c7d-951c-ff13f8814802\") " Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.137209 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts" (OuterVolumeSpecName: "scripts") pod "34c9d979-fc5d-4c7d-951c-ff13f8814802" (UID: "34c9d979-fc5d-4c7d-951c-ff13f8814802"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.137638 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5" (OuterVolumeSpecName: "kube-api-access-nrgs5") pod "34c9d979-fc5d-4c7d-951c-ff13f8814802" (UID: "34c9d979-fc5d-4c7d-951c-ff13f8814802"). InnerVolumeSpecName "kube-api-access-nrgs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.149936 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data" (OuterVolumeSpecName: "config-data") pod "34c9d979-fc5d-4c7d-951c-ff13f8814802" (UID: "34c9d979-fc5d-4c7d-951c-ff13f8814802"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.167400 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.255:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.167503 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.255:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.169465 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34c9d979-fc5d-4c7d-951c-ff13f8814802" (UID: "34c9d979-fc5d-4c7d-951c-ff13f8814802"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.219375 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.219596 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.219676 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c9d979-fc5d-4c7d-951c-ff13f8814802-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.219752 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrgs5\" (UniqueName: \"kubernetes.io/projected/34c9d979-fc5d-4c7d-951c-ff13f8814802-kube-api-access-nrgs5\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.538142 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7bqdn" event={"ID":"34c9d979-fc5d-4c7d-951c-ff13f8814802","Type":"ContainerDied","Data":"c8b2b2d50b33a103627b81d6a34447ebea61953f7edda092f54f08a611434178"} Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.538194 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8b2b2d50b33a103627b81d6a34447ebea61953f7edda092f54f08a611434178" Feb 18 00:59:20 crc kubenswrapper[4791]: I0218 00:59:20.538250 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7bqdn" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.156767 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Feb 18 00:59:25 crc kubenswrapper[4791]: E0218 00:59:25.157829 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34c9d979-fc5d-4c7d-951c-ff13f8814802" containerName="aodh-db-sync" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.157845 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="34c9d979-fc5d-4c7d-951c-ff13f8814802" containerName="aodh-db-sync" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.158045 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="34c9d979-fc5d-4c7d-951c-ff13f8814802" containerName="aodh-db-sync" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.160535 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.162403 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.164528 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.164718 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-n4w9p" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.169497 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.242321 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.242421 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.242591 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96b8p\" (UniqueName: \"kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.242614 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.363570 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.364220 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.364588 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96b8p\" (UniqueName: \"kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.364694 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.371822 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.371847 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.378603 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.386765 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96b8p\" (UniqueName: \"kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p\") pod \"aodh-0\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " pod="openstack/aodh-0" Feb 18 00:59:25 crc kubenswrapper[4791]: I0218 00:59:25.486245 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.015657 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 18 00:59:26 crc kubenswrapper[4791]: E0218 00:59:26.249018 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13f78453_b30b_4f2c_bec0_22c7e601d5c7.slice/crio-3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb560aa26_6a1f_4695_aeaa_b1a80b0765ec.slice/crio-9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13f78453_b30b_4f2c_bec0_22c7e601d5c7.slice/crio-conmon-3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb560aa26_6a1f_4695_aeaa_b1a80b0765ec.slice/crio-conmon-9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb.scope\": RecentStats: unable to find data in memory cache]" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.632561 4791 generic.go:334] "Generic (PLEG): container finished" podID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerID="3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd" exitCode=137 Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.632635 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerDied","Data":"3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd"} Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.632897 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13f78453-b30b-4f2c-bec0-22c7e601d5c7","Type":"ContainerDied","Data":"619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb"} Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.632916 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="619d2093bf45cfedf20f3e0bdc3bc0fdd5701259fd5e98f2cf695cfc052ff0bb" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.634027 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerStarted","Data":"2930ca4a2e54c38da2baa9aaee9af34359d9b3f419bed2367e258ec728fdd9dd"} Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.635968 4791 generic.go:334] "Generic (PLEG): container finished" podID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" containerID="9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb" exitCode=137 Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.635993 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b560aa26-6a1f-4695-aeaa-b1a80b0765ec","Type":"ContainerDied","Data":"9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb"} Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.636024 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b560aa26-6a1f-4695-aeaa-b1a80b0765ec","Type":"ContainerDied","Data":"e8ca1782d68f08bda5a42ed45ff261a6db532c448cd1a3cca23fc9454e73a2de"} Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.636034 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8ca1782d68f08bda5a42ed45ff261a6db532c448cd1a3cca23fc9454e73a2de" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.682072 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.688146 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801545 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs\") pod \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801598 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cvb8\" (UniqueName: \"kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8\") pod \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801670 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data\") pod \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801690 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data\") pod \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801835 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle\") pod \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\" (UID: \"13f78453-b30b-4f2c-bec0-22c7e601d5c7\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.801963 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xv8lj\" (UniqueName: \"kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj\") pod \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.802026 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle\") pod \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\" (UID: \"b560aa26-6a1f-4695-aeaa-b1a80b0765ec\") " Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.803608 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs" (OuterVolumeSpecName: "logs") pod "13f78453-b30b-4f2c-bec0-22c7e601d5c7" (UID: "13f78453-b30b-4f2c-bec0-22c7e601d5c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.808337 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8" (OuterVolumeSpecName: "kube-api-access-9cvb8") pod "13f78453-b30b-4f2c-bec0-22c7e601d5c7" (UID: "13f78453-b30b-4f2c-bec0-22c7e601d5c7"). InnerVolumeSpecName "kube-api-access-9cvb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.815441 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj" (OuterVolumeSpecName: "kube-api-access-xv8lj") pod "b560aa26-6a1f-4695-aeaa-b1a80b0765ec" (UID: "b560aa26-6a1f-4695-aeaa-b1a80b0765ec"). InnerVolumeSpecName "kube-api-access-xv8lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.874300 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data" (OuterVolumeSpecName: "config-data") pod "13f78453-b30b-4f2c-bec0-22c7e601d5c7" (UID: "13f78453-b30b-4f2c-bec0-22c7e601d5c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.887260 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data" (OuterVolumeSpecName: "config-data") pod "b560aa26-6a1f-4695-aeaa-b1a80b0765ec" (UID: "b560aa26-6a1f-4695-aeaa-b1a80b0765ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.889439 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b560aa26-6a1f-4695-aeaa-b1a80b0765ec" (UID: "b560aa26-6a1f-4695-aeaa-b1a80b0765ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.894248 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13f78453-b30b-4f2c-bec0-22c7e601d5c7" (UID: "13f78453-b30b-4f2c-bec0-22c7e601d5c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907766 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xv8lj\" (UniqueName: \"kubernetes.io/projected/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-kube-api-access-xv8lj\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907802 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907812 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13f78453-b30b-4f2c-bec0-22c7e601d5c7-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907822 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cvb8\" (UniqueName: \"kubernetes.io/projected/13f78453-b30b-4f2c-bec0-22c7e601d5c7-kube-api-access-9cvb8\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907831 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907839 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b560aa26-6a1f-4695-aeaa-b1a80b0765ec-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:26 crc kubenswrapper[4791]: I0218 00:59:26.907847 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13f78453-b30b-4f2c-bec0-22c7e601d5c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.528896 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.750109 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.750344 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerStarted","Data":"9ca55234f7fded51ec0c6b658c7b77583e8d3098a7b8e005247ca2a013d27fa4"} Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.752315 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.814239 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.842213 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.871911 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.926726 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: E0218 00:59:27.927240 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-log" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927258 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-log" Feb 18 00:59:27 crc kubenswrapper[4791]: E0218 00:59:27.927296 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" containerName="nova-cell1-novncproxy-novncproxy" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927303 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" containerName="nova-cell1-novncproxy-novncproxy" Feb 18 00:59:27 crc kubenswrapper[4791]: E0218 00:59:27.927321 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-metadata" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927327 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-metadata" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927523 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-metadata" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927535 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" containerName="nova-cell1-novncproxy-novncproxy" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.927560 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" containerName="nova-metadata-log" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.928690 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.931109 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.931354 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.942818 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.951710 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.951885 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbp58\" (UniqueName: \"kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.951932 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.951950 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.951980 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.958387 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.968856 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.970563 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.975767 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.975862 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.975897 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Feb 18 00:59:27 crc kubenswrapper[4791]: I0218 00:59:27.979778 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.055070 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.056345 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbp58\" (UniqueName: \"kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.056416 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.056444 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.056492 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.057141 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.069105 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.069180 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.070225 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.072774 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbp58\" (UniqueName: \"kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58\") pod \"nova-metadata-0\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.159031 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.159145 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.159201 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nplz2\" (UniqueName: \"kubernetes.io/projected/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-kube-api-access-nplz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.159227 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.159281 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.245482 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.245991 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-central-agent" containerID="cri-o://4c7612fd6f6a0fa38bd600643f3c0070cfd7c993fd1e225c0e406af5cf936b53" gracePeriod=30 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.246297 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-notification-agent" containerID="cri-o://42a78a94641d73380c44ac477a87bcde1b640e51e613cbecb083c0e82635a7c5" gracePeriod=30 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.246328 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="proxy-httpd" containerID="cri-o://2f82488cbc129cf4cbe31cb94ccdf8a22f518c1ba381f0400f3e7babdd9f87fd" gracePeriod=30 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.246408 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="sg-core" containerID="cri-o://df765d183742581ea1577d91862b93b2592ae80aae1783f3f80e35a4fb81228c" gracePeriod=30 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.261352 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.261525 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.262325 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.262561 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nplz2\" (UniqueName: \"kubernetes.io/projected/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-kube-api-access-nplz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.262691 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.262881 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.265136 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.266748 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.278012 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.280806 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.280822 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nplz2\" (UniqueName: \"kubernetes.io/projected/87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5-kube-api-access-nplz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5\") " pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.297798 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.698901 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763466 4791 generic.go:334] "Generic (PLEG): container finished" podID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerID="2f82488cbc129cf4cbe31cb94ccdf8a22f518c1ba381f0400f3e7babdd9f87fd" exitCode=0 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763497 4791 generic.go:334] "Generic (PLEG): container finished" podID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerID="df765d183742581ea1577d91862b93b2592ae80aae1783f3f80e35a4fb81228c" exitCode=2 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763508 4791 generic.go:334] "Generic (PLEG): container finished" podID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerID="4c7612fd6f6a0fa38bd600643f3c0070cfd7c993fd1e225c0e406af5cf936b53" exitCode=0 Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763529 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerDied","Data":"2f82488cbc129cf4cbe31cb94ccdf8a22f518c1ba381f0400f3e7babdd9f87fd"} Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763555 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerDied","Data":"df765d183742581ea1577d91862b93b2592ae80aae1783f3f80e35a4fb81228c"} Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.763564 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerDied","Data":"4c7612fd6f6a0fa38bd600643f3c0070cfd7c993fd1e225c0e406af5cf936b53"} Feb 18 00:59:28 crc kubenswrapper[4791]: I0218 00:59:28.851199 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.014517 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.072595 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13f78453-b30b-4f2c-bec0-22c7e601d5c7" path="/var/lib/kubelet/pods/13f78453-b30b-4f2c-bec0-22c7e601d5c7/volumes" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.073222 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b560aa26-6a1f-4695-aeaa-b1a80b0765ec" path="/var/lib/kubelet/pods/b560aa26-6a1f-4695-aeaa-b1a80b0765ec/volumes" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.094861 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.096829 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.119484 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.122922 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.781819 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5","Type":"ContainerStarted","Data":"772df9344a0c941ec8342656268cfd4eedf3ec7ed77093bd7b48f431a7225048"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.782123 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5","Type":"ContainerStarted","Data":"6665d7ddae93d92424a3fc5d463804f6e69a551c48e1b7c63e5218b9e031f8b5"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.785213 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerStarted","Data":"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.785241 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerStarted","Data":"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.785250 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerStarted","Data":"01a02b51c9d9747c4dc101f7d35f843caa1f70aa640b1846e040d4412bb5b64a"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.790095 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerStarted","Data":"9cd2c0a8a75f4033983e0d35e14d0964ee728560b945a757583825bd9f8fb7db"} Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.790171 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.801288 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.801273131 podStartE2EDuration="2.801273131s" podCreationTimestamp="2026-02-18 00:59:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:29.796953497 +0000 UTC m=+1511.364966667" watchObservedRunningTime="2026-02-18 00:59:29.801273131 +0000 UTC m=+1511.369286291" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.825899 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 18 00:59:29 crc kubenswrapper[4791]: I0218 00:59:29.826709 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.826689868 podStartE2EDuration="2.826689868s" podCreationTimestamp="2026-02-18 00:59:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:29.814542662 +0000 UTC m=+1511.382555832" watchObservedRunningTime="2026-02-18 00:59:29.826689868 +0000 UTC m=+1511.394703038" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.037394 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.039462 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.054066 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.154870 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.154949 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.155005 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjt2w\" (UniqueName: \"kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.155130 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.155181 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.155273 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.257620 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.257976 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.258041 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.258094 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.258141 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.258184 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjt2w\" (UniqueName: \"kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.259740 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.260339 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.260899 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.261863 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.262402 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.346219 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjt2w\" (UniqueName: \"kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w\") pod \"dnsmasq-dns-79b5d74c8c-b7s2x\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:30 crc kubenswrapper[4791]: I0218 00:59:30.420821 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:31 crc kubenswrapper[4791]: I0218 00:59:31.079692 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 00:59:31 crc kubenswrapper[4791]: I0218 00:59:31.824918 4791 generic.go:334] "Generic (PLEG): container finished" podID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerID="2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed" exitCode=0 Feb 18 00:59:31 crc kubenswrapper[4791]: I0218 00:59:31.825115 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" event={"ID":"3d564d20-a048-4a0b-93c5-6b2b1dd278f8","Type":"ContainerDied","Data":"2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed"} Feb 18 00:59:31 crc kubenswrapper[4791]: I0218 00:59:31.825299 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" event={"ID":"3d564d20-a048-4a0b-93c5-6b2b1dd278f8","Type":"ContainerStarted","Data":"1d7c4590afd53498f12e2db4667d2b7a8b30948e342666447b619a46e5e79c4a"} Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.914735 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" event={"ID":"3d564d20-a048-4a0b-93c5-6b2b1dd278f8","Type":"ContainerStarted","Data":"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7"} Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.915580 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.947473 4791 generic.go:334] "Generic (PLEG): container finished" podID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerID="42a78a94641d73380c44ac477a87bcde1b640e51e613cbecb083c0e82635a7c5" exitCode=0 Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.947521 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerDied","Data":"42a78a94641d73380c44ac477a87bcde1b640e51e613cbecb083c0e82635a7c5"} Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.971576 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.971783 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-log" containerID="cri-o://6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019" gracePeriod=30 Feb 18 00:59:32 crc kubenswrapper[4791]: I0218 00:59:32.972319 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-api" containerID="cri-o://3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66" gracePeriod=30 Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.001884 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" podStartSLOduration=4.001865582 podStartE2EDuration="4.001865582s" podCreationTimestamp="2026-02-18 00:59:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:32.954076454 +0000 UTC m=+1514.522089624" watchObservedRunningTime="2026-02-18 00:59:33.001865582 +0000 UTC m=+1514.569878752" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.190621 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.262790 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.264290 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.299353 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.383984 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384058 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384079 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wmcr\" (UniqueName: \"kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384104 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384285 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384378 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384410 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle\") pod \"1b6491f1-bdcf-4dee-8431-f40a42f31974\" (UID: \"1b6491f1-bdcf-4dee-8431-f40a42f31974\") " Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384857 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.384952 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.386097 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.386297 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1b6491f1-bdcf-4dee-8431-f40a42f31974-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.389908 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr" (OuterVolumeSpecName: "kube-api-access-6wmcr") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "kube-api-access-6wmcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.391447 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts" (OuterVolumeSpecName: "scripts") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.418067 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.483309 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.488518 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.488648 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wmcr\" (UniqueName: \"kubernetes.io/projected/1b6491f1-bdcf-4dee-8431-f40a42f31974-kube-api-access-6wmcr\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.488706 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.489310 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.512303 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data" (OuterVolumeSpecName: "config-data") pod "1b6491f1-bdcf-4dee-8431-f40a42f31974" (UID: "1b6491f1-bdcf-4dee-8431-f40a42f31974"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.591932 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b6491f1-bdcf-4dee-8431-f40a42f31974-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.964349 4791 generic.go:334] "Generic (PLEG): container finished" podID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerID="6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019" exitCode=143 Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.964440 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerDied","Data":"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019"} Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.968064 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerStarted","Data":"5c178396279c61d27e41529cc52280e09a3234fba50b14c4a18f93432598052e"} Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.975692 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.981816 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1b6491f1-bdcf-4dee-8431-f40a42f31974","Type":"ContainerDied","Data":"a51bb8a25a1dba1e6234536f996ccefa6b404885c68c86e9da2fc40b026a6320"} Feb 18 00:59:33 crc kubenswrapper[4791]: I0218 00:59:33.981896 4791 scope.go:117] "RemoveContainer" containerID="2f82488cbc129cf4cbe31cb94ccdf8a22f518c1ba381f0400f3e7babdd9f87fd" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.015777 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.027818 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.043305 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:34 crc kubenswrapper[4791]: E0218 00:59:34.043878 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-central-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.043901 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-central-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: E0218 00:59:34.043925 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="sg-core" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.043932 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="sg-core" Feb 18 00:59:34 crc kubenswrapper[4791]: E0218 00:59:34.043944 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-notification-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.043950 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-notification-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: E0218 00:59:34.043973 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="proxy-httpd" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.043980 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="proxy-httpd" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.044210 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-central-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.044228 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="ceilometer-notification-agent" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.044246 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="proxy-httpd" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.044267 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" containerName="sg-core" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.046420 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.054048 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.103580 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.104471 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105667 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105730 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105787 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105834 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105866 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105949 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmvl7\" (UniqueName: \"kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.105977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208250 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208311 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208368 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208402 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208435 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208479 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmvl7\" (UniqueName: \"kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.208495 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.209779 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.210326 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.218841 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.224263 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.224469 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.227688 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmvl7\" (UniqueName: \"kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.227919 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.328314 4791 scope.go:117] "RemoveContainer" containerID="df765d183742581ea1577d91862b93b2592ae80aae1783f3f80e35a4fb81228c" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.347046 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.348197 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.384307 4791 scope.go:117] "RemoveContainer" containerID="42a78a94641d73380c44ac477a87bcde1b640e51e613cbecb083c0e82635a7c5" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.480949 4791 scope.go:117] "RemoveContainer" containerID="4c7612fd6f6a0fa38bd600643f3c0070cfd7c993fd1e225c0e406af5cf936b53" Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.986481 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerStarted","Data":"b3531dbc69ff57f8c6aea658e21db3f0d9b11411c11e5dea409f166daf22056b"} Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.987119 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-api" containerID="cri-o://9ca55234f7fded51ec0c6b658c7b77583e8d3098a7b8e005247ca2a013d27fa4" gracePeriod=30 Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.987664 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-notifier" containerID="cri-o://5c178396279c61d27e41529cc52280e09a3234fba50b14c4a18f93432598052e" gracePeriod=30 Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.987679 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-evaluator" containerID="cri-o://9cd2c0a8a75f4033983e0d35e14d0964ee728560b945a757583825bd9f8fb7db" gracePeriod=30 Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.987708 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-listener" containerID="cri-o://b3531dbc69ff57f8c6aea658e21db3f0d9b11411c11e5dea409f166daf22056b" gracePeriod=30 Feb 18 00:59:34 crc kubenswrapper[4791]: I0218 00:59:34.988336 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 00:59:35 crc kubenswrapper[4791]: W0218 00:59:35.019776 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7486cd5a_bf0d_4162_95ff_505011bcbda3.slice/crio-a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a WatchSource:0}: Error finding container a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a: Status 404 returned error can't find the container with id a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a Feb 18 00:59:35 crc kubenswrapper[4791]: I0218 00:59:35.088062 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b6491f1-bdcf-4dee-8431-f40a42f31974" path="/var/lib/kubelet/pods/1b6491f1-bdcf-4dee-8431-f40a42f31974/volumes" Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.002949 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerStarted","Data":"6c174fdf234fd334ffe9a6f553d9b44d2b8e85e1dd65161a78424566c3ebfea2"} Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.003299 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerStarted","Data":"a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a"} Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.004985 4791 generic.go:334] "Generic (PLEG): container finished" podID="153d9910-d8a5-43d7-8683-dab4c032949b" containerID="9cd2c0a8a75f4033983e0d35e14d0964ee728560b945a757583825bd9f8fb7db" exitCode=0 Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.005016 4791 generic.go:334] "Generic (PLEG): container finished" podID="153d9910-d8a5-43d7-8683-dab4c032949b" containerID="9ca55234f7fded51ec0c6b658c7b77583e8d3098a7b8e005247ca2a013d27fa4" exitCode=0 Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.005036 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerDied","Data":"9cd2c0a8a75f4033983e0d35e14d0964ee728560b945a757583825bd9f8fb7db"} Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.005063 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerDied","Data":"9ca55234f7fded51ec0c6b658c7b77583e8d3098a7b8e005247ca2a013d27fa4"} Feb 18 00:59:36 crc kubenswrapper[4791]: I0218 00:59:36.981898 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.005923 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.6439538259999997 podStartE2EDuration="12.005898764s" podCreationTimestamp="2026-02-18 00:59:25 +0000 UTC" firstStartedPulling="2026-02-18 00:59:26.118242868 +0000 UTC m=+1507.686256038" lastFinishedPulling="2026-02-18 00:59:34.480187806 +0000 UTC m=+1516.048200976" observedRunningTime="2026-02-18 00:59:35.025034748 +0000 UTC m=+1516.593047918" watchObservedRunningTime="2026-02-18 00:59:37.005898764 +0000 UTC m=+1518.573911934" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.042406 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerStarted","Data":"0efa0e6358b07f086c2ce4083ee352f0b20c59f1d6237cab0949400ad5cd0917"} Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.045395 4791 generic.go:334] "Generic (PLEG): container finished" podID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerID="3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66" exitCode=0 Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.045432 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerDied","Data":"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66"} Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.045457 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07a4538d-b54c-45b5-8455-5d7d0a0144b8","Type":"ContainerDied","Data":"5b3455353264e4e9807a77c72e3f16539188710dce69f9732c4c1da26632f3a5"} Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.045475 4791 scope.go:117] "RemoveContainer" containerID="3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.045608 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.083573 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle\") pod \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.083793 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs\") pod \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.084277 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs" (OuterVolumeSpecName: "logs") pod "07a4538d-b54c-45b5-8455-5d7d0a0144b8" (UID: "07a4538d-b54c-45b5-8455-5d7d0a0144b8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.084354 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6rmc\" (UniqueName: \"kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc\") pod \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.084825 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data\") pod \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\" (UID: \"07a4538d-b54c-45b5-8455-5d7d0a0144b8\") " Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.086036 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07a4538d-b54c-45b5-8455-5d7d0a0144b8-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.104448 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc" (OuterVolumeSpecName: "kube-api-access-g6rmc") pod "07a4538d-b54c-45b5-8455-5d7d0a0144b8" (UID: "07a4538d-b54c-45b5-8455-5d7d0a0144b8"). InnerVolumeSpecName "kube-api-access-g6rmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.105587 4791 scope.go:117] "RemoveContainer" containerID="6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.121869 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data" (OuterVolumeSpecName: "config-data") pod "07a4538d-b54c-45b5-8455-5d7d0a0144b8" (UID: "07a4538d-b54c-45b5-8455-5d7d0a0144b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.144812 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07a4538d-b54c-45b5-8455-5d7d0a0144b8" (UID: "07a4538d-b54c-45b5-8455-5d7d0a0144b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.190312 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.190356 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a4538d-b54c-45b5-8455-5d7d0a0144b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.190371 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6rmc\" (UniqueName: \"kubernetes.io/projected/07a4538d-b54c-45b5-8455-5d7d0a0144b8-kube-api-access-g6rmc\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.303389 4791 scope.go:117] "RemoveContainer" containerID="3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66" Feb 18 00:59:37 crc kubenswrapper[4791]: E0218 00:59:37.304209 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66\": container with ID starting with 3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66 not found: ID does not exist" containerID="3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.304246 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66"} err="failed to get container status \"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66\": rpc error: code = NotFound desc = could not find container \"3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66\": container with ID starting with 3a78929b084d217f6863356a5f2feb6f084607971a13a0784994a7607811fc66 not found: ID does not exist" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.304271 4791 scope.go:117] "RemoveContainer" containerID="6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019" Feb 18 00:59:37 crc kubenswrapper[4791]: E0218 00:59:37.305137 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019\": container with ID starting with 6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019 not found: ID does not exist" containerID="6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.305190 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019"} err="failed to get container status \"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019\": rpc error: code = NotFound desc = could not find container \"6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019\": container with ID starting with 6d5f413397c33456907645fdf5056da7a7acef430cbe138126a867f77095e019 not found: ID does not exist" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.423830 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.431869 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.441939 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:37 crc kubenswrapper[4791]: E0218 00:59:37.442493 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-api" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.442519 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-api" Feb 18 00:59:37 crc kubenswrapper[4791]: E0218 00:59:37.442557 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-log" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.442566 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-log" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.442784 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-api" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.442811 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" containerName="nova-api-log" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.443987 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.446802 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.446975 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.447085 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.481557 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.504817 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.504869 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.504994 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.505012 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.505038 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjfgq\" (UniqueName: \"kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.505068 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.605925 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.605981 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.606092 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.606108 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.606133 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjfgq\" (UniqueName: \"kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.606206 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.606651 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.610998 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.611062 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.611824 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.613058 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.628683 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjfgq\" (UniqueName: \"kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq\") pod \"nova-api-0\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " pod="openstack/nova-api-0" Feb 18 00:59:37 crc kubenswrapper[4791]: I0218 00:59:37.764556 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.156919 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerStarted","Data":"15145d3564188230ea41d711877db781eb5e2dd0457d51bfb973753de894ac0b"} Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.262572 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.262631 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.302652 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.331610 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:38 crc kubenswrapper[4791]: W0218 00:59:38.428496 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod041339eb_ee1d_48bb_9e31_c821067b5e04.slice/crio-beab38b6d7e2ea168882983d74c3488d0557c6d091316224275d5357b13c5af0 WatchSource:0}: Error finding container beab38b6d7e2ea168882983d74c3488d0557c6d091316224275d5357b13c5af0: Status 404 returned error can't find the container with id beab38b6d7e2ea168882983d74c3488d0557c6d091316224275d5357b13c5af0 Feb 18 00:59:38 crc kubenswrapper[4791]: I0218 00:59:38.429322 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.073130 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07a4538d-b54c-45b5-8455-5d7d0a0144b8" path="/var/lib/kubelet/pods/07a4538d-b54c-45b5-8455-5d7d0a0144b8/volumes" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.169386 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerStarted","Data":"48f24dd266741d954f6136b6dbf7292affff9b6f8cc7be3be601e1e1c0ed7021"} Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.169543 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-central-agent" containerID="cri-o://6c174fdf234fd334ffe9a6f553d9b44d2b8e85e1dd65161a78424566c3ebfea2" gracePeriod=30 Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.171870 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.172891 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="proxy-httpd" containerID="cri-o://48f24dd266741d954f6136b6dbf7292affff9b6f8cc7be3be601e1e1c0ed7021" gracePeriod=30 Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.172983 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="sg-core" containerID="cri-o://15145d3564188230ea41d711877db781eb5e2dd0457d51bfb973753de894ac0b" gracePeriod=30 Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.173024 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-notification-agent" containerID="cri-o://0efa0e6358b07f086c2ce4083ee352f0b20c59f1d6237cab0949400ad5cd0917" gracePeriod=30 Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.184309 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerStarted","Data":"006c9f4c55e116d52b069016a052d9f9ad4c2609d35b2373212532926de7992e"} Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.184348 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerStarted","Data":"efc4eb2a2d94360d2be7a3ced97c6671bf8d9dca98c611bb6ff954c7a5b5ec75"} Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.184360 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerStarted","Data":"beab38b6d7e2ea168882983d74c3488d0557c6d091316224275d5357b13c5af0"} Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.210063 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.4369626580000001 podStartE2EDuration="5.210028816s" podCreationTimestamp="2026-02-18 00:59:34 +0000 UTC" firstStartedPulling="2026-02-18 00:59:35.032169228 +0000 UTC m=+1516.600182398" lastFinishedPulling="2026-02-18 00:59:38.805235386 +0000 UTC m=+1520.373248556" observedRunningTime="2026-02-18 00:59:39.192396391 +0000 UTC m=+1520.760409561" watchObservedRunningTime="2026-02-18 00:59:39.210028816 +0000 UTC m=+1520.778041976" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.211523 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.228304 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.22828021 podStartE2EDuration="2.22828021s" podCreationTimestamp="2026-02-18 00:59:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:39.214457873 +0000 UTC m=+1520.782471043" watchObservedRunningTime="2026-02-18 00:59:39.22828021 +0000 UTC m=+1520.796293390" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.279369 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.279651 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.441564 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-4r4tl"] Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.443323 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.448096 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.448878 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.458352 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4r4tl"] Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.574261 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-966mt\" (UniqueName: \"kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.574385 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.574745 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.574868 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.677479 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.677872 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.678034 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-966mt\" (UniqueName: \"kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.678260 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.683450 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.683507 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.687940 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.695946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-966mt\" (UniqueName: \"kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt\") pod \"nova-cell1-cell-mapping-4r4tl\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:39 crc kubenswrapper[4791]: I0218 00:59:39.792943 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.218668 4791 generic.go:334] "Generic (PLEG): container finished" podID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerID="15145d3564188230ea41d711877db781eb5e2dd0457d51bfb973753de894ac0b" exitCode=2 Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.219170 4791 generic.go:334] "Generic (PLEG): container finished" podID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerID="0efa0e6358b07f086c2ce4083ee352f0b20c59f1d6237cab0949400ad5cd0917" exitCode=0 Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.218877 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerDied","Data":"15145d3564188230ea41d711877db781eb5e2dd0457d51bfb973753de894ac0b"} Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.219746 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerDied","Data":"0efa0e6358b07f086c2ce4083ee352f0b20c59f1d6237cab0949400ad5cd0917"} Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.285090 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4r4tl"] Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.424340 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.489930 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:59:40 crc kubenswrapper[4791]: I0218 00:59:40.490182 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="dnsmasq-dns" containerID="cri-o://4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d" gracePeriod=10 Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.217044 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.232197 4791 generic.go:334] "Generic (PLEG): container finished" podID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerID="4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d" exitCode=0 Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.232262 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" event={"ID":"78f72e20-5be7-448d-b18c-390ad193f0ea","Type":"ContainerDied","Data":"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d"} Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.232294 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" event={"ID":"78f72e20-5be7-448d-b18c-390ad193f0ea","Type":"ContainerDied","Data":"9ea8af2c253e5e4ea04e52ef59cca000bcafb0085f56c314ba0c0b535cb6757d"} Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.232310 4791 scope.go:117] "RemoveContainer" containerID="4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.232387 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-qnm85" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.234482 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4r4tl" event={"ID":"c2b1a453-7c2e-47bd-8c65-c0d6e1939741","Type":"ContainerStarted","Data":"db7096f2287b70612c4065644639e11d95c5e61ee0123c974484d99a30eb398d"} Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.234507 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4r4tl" event={"ID":"c2b1a453-7c2e-47bd-8c65-c0d6e1939741","Type":"ContainerStarted","Data":"a7550e76110474cceebf31a580b4d8f67e51fe956583a0e41e74ddc081e6c670"} Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.273995 4791 scope.go:117] "RemoveContainer" containerID="a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.277142 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-4r4tl" podStartSLOduration=2.277076728 podStartE2EDuration="2.277076728s" podCreationTimestamp="2026-02-18 00:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:41.272382573 +0000 UTC m=+1522.840395743" watchObservedRunningTime="2026-02-18 00:59:41.277076728 +0000 UTC m=+1522.845089898" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.307685 4791 scope.go:117] "RemoveContainer" containerID="4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d" Feb 18 00:59:41 crc kubenswrapper[4791]: E0218 00:59:41.308646 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d\": container with ID starting with 4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d not found: ID does not exist" containerID="4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.308674 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d"} err="failed to get container status \"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d\": rpc error: code = NotFound desc = could not find container \"4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d\": container with ID starting with 4c488e324c9cbcda163c2984fd26aadbfbb9154229e641d94b7d61f46a40cd5d not found: ID does not exist" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.308693 4791 scope.go:117] "RemoveContainer" containerID="a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0" Feb 18 00:59:41 crc kubenswrapper[4791]: E0218 00:59:41.308925 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0\": container with ID starting with a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0 not found: ID does not exist" containerID="a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.308940 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0"} err="failed to get container status \"a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0\": rpc error: code = NotFound desc = could not find container \"a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0\": container with ID starting with a234656e00b70a0a32e4bfe9975e92cbd0efe68de6556f1c6cec0b8dc167b0f0 not found: ID does not exist" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knbqq\" (UniqueName: \"kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325143 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325304 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325323 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325386 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.325411 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb\") pod \"78f72e20-5be7-448d-b18c-390ad193f0ea\" (UID: \"78f72e20-5be7-448d-b18c-390ad193f0ea\") " Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.337925 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq" (OuterVolumeSpecName: "kube-api-access-knbqq") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "kube-api-access-knbqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.392794 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.397698 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.400561 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config" (OuterVolumeSpecName: "config") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.413972 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.429265 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-config\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.429295 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.429305 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.429317 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knbqq\" (UniqueName: \"kubernetes.io/projected/78f72e20-5be7-448d-b18c-390ad193f0ea-kube-api-access-knbqq\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.429325 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.446692 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "78f72e20-5be7-448d-b18c-390ad193f0ea" (UID: "78f72e20-5be7-448d-b18c-390ad193f0ea"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.532603 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78f72e20-5be7-448d-b18c-390ad193f0ea-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.590102 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:59:41 crc kubenswrapper[4791]: I0218 00:59:41.600747 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-qnm85"] Feb 18 00:59:43 crc kubenswrapper[4791]: I0218 00:59:43.076297 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" path="/var/lib/kubelet/pods/78f72e20-5be7-448d-b18c-390ad193f0ea/volumes" Feb 18 00:59:43 crc kubenswrapper[4791]: I0218 00:59:43.261699 4791 generic.go:334] "Generic (PLEG): container finished" podID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerID="6c174fdf234fd334ffe9a6f553d9b44d2b8e85e1dd65161a78424566c3ebfea2" exitCode=0 Feb 18 00:59:43 crc kubenswrapper[4791]: I0218 00:59:43.261810 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerDied","Data":"6c174fdf234fd334ffe9a6f553d9b44d2b8e85e1dd65161a78424566c3ebfea2"} Feb 18 00:59:46 crc kubenswrapper[4791]: I0218 00:59:46.309045 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2b1a453-7c2e-47bd-8c65-c0d6e1939741" containerID="db7096f2287b70612c4065644639e11d95c5e61ee0123c974484d99a30eb398d" exitCode=0 Feb 18 00:59:46 crc kubenswrapper[4791]: I0218 00:59:46.309096 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4r4tl" event={"ID":"c2b1a453-7c2e-47bd-8c65-c0d6e1939741","Type":"ContainerDied","Data":"db7096f2287b70612c4065644639e11d95c5e61ee0123c974484d99a30eb398d"} Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.765496 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.766252 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.893011 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.902284 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data\") pod \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.902399 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-966mt\" (UniqueName: \"kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt\") pod \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.902462 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts\") pod \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.902482 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle\") pod \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\" (UID: \"c2b1a453-7c2e-47bd-8c65-c0d6e1939741\") " Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.912700 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt" (OuterVolumeSpecName: "kube-api-access-966mt") pod "c2b1a453-7c2e-47bd-8c65-c0d6e1939741" (UID: "c2b1a453-7c2e-47bd-8c65-c0d6e1939741"). InnerVolumeSpecName "kube-api-access-966mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.913441 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts" (OuterVolumeSpecName: "scripts") pod "c2b1a453-7c2e-47bd-8c65-c0d6e1939741" (UID: "c2b1a453-7c2e-47bd-8c65-c0d6e1939741"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.943335 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data" (OuterVolumeSpecName: "config-data") pod "c2b1a453-7c2e-47bd-8c65-c0d6e1939741" (UID: "c2b1a453-7c2e-47bd-8c65-c0d6e1939741"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:47 crc kubenswrapper[4791]: I0218 00:59:47.969881 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2b1a453-7c2e-47bd-8c65-c0d6e1939741" (UID: "c2b1a453-7c2e-47bd-8c65-c0d6e1939741"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.006302 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-966mt\" (UniqueName: \"kubernetes.io/projected/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-kube-api-access-966mt\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.006340 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.006352 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.006363 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2b1a453-7c2e-47bd-8c65-c0d6e1939741-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.267735 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.269037 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.272697 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.337531 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4r4tl" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.346221 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4r4tl" event={"ID":"c2b1a453-7c2e-47bd-8c65-c0d6e1939741","Type":"ContainerDied","Data":"a7550e76110474cceebf31a580b4d8f67e51fe956583a0e41e74ddc081e6c670"} Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.346251 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7550e76110474cceebf31a580b4d8f67e51fe956583a0e41e74ddc081e6c670" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.362358 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.567553 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.568148 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-log" containerID="cri-o://efc4eb2a2d94360d2be7a3ced97c6671bf8d9dca98c611bb6ff954c7a5b5ec75" gracePeriod=30 Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.568362 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-api" containerID="cri-o://006c9f4c55e116d52b069016a052d9f9ad4c2609d35b2373212532926de7992e" gracePeriod=30 Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.581102 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.6:8774/\": EOF" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.581535 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.6:8774/\": EOF" Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.591220 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.594001 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerName="nova-scheduler-scheduler" containerID="cri-o://efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" gracePeriod=30 Feb 18 00:59:48 crc kubenswrapper[4791]: I0218 00:59:48.608027 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:49 crc kubenswrapper[4791]: I0218 00:59:49.351745 4791 generic.go:334] "Generic (PLEG): container finished" podID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerID="efc4eb2a2d94360d2be7a3ced97c6671bf8d9dca98c611bb6ff954c7a5b5ec75" exitCode=143 Feb 18 00:59:49 crc kubenswrapper[4791]: I0218 00:59:49.352037 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerDied","Data":"efc4eb2a2d94360d2be7a3ced97c6671bf8d9dca98c611bb6ff954c7a5b5ec75"} Feb 18 00:59:49 crc kubenswrapper[4791]: E0218 00:59:49.765811 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 18 00:59:49 crc kubenswrapper[4791]: E0218 00:59:49.767525 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 18 00:59:49 crc kubenswrapper[4791]: E0218 00:59:49.771205 4791 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Feb 18 00:59:49 crc kubenswrapper[4791]: E0218 00:59:49.771237 4791 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerName="nova-scheduler-scheduler" Feb 18 00:59:50 crc kubenswrapper[4791]: I0218 00:59:50.368522 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" containerID="cri-o://e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50" gracePeriod=30 Feb 18 00:59:50 crc kubenswrapper[4791]: I0218 00:59:50.368547 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" containerID="cri-o://63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55" gracePeriod=30 Feb 18 00:59:51 crc kubenswrapper[4791]: I0218 00:59:51.385301 4791 generic.go:334] "Generic (PLEG): container finished" podID="006d44aa-45d0-4ebb-9b58-18246020566e" containerID="63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55" exitCode=143 Feb 18 00:59:51 crc kubenswrapper[4791]: I0218 00:59:51.385361 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerDied","Data":"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55"} Feb 18 00:59:53 crc kubenswrapper[4791]: I0218 00:59:53.521453 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": read tcp 10.217.0.2:35948->10.217.1.2:8775: read: connection reset by peer" Feb 18 00:59:53 crc kubenswrapper[4791]: I0218 00:59:53.521700 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.2:8775/\": read tcp 10.217.0.2:35962->10.217.1.2:8775: read: connection reset by peer" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.253169 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.271599 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380173 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle\") pod \"006d44aa-45d0-4ebb-9b58-18246020566e\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380227 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data\") pod \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380258 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzscq\" (UniqueName: \"kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq\") pod \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380296 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle\") pod \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\" (UID: \"11c5a6bf-395f-4b92-a6a3-99176c573b6b\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380496 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbp58\" (UniqueName: \"kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58\") pod \"006d44aa-45d0-4ebb-9b58-18246020566e\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380537 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs\") pod \"006d44aa-45d0-4ebb-9b58-18246020566e\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380576 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data\") pod \"006d44aa-45d0-4ebb-9b58-18246020566e\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.380718 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs\") pod \"006d44aa-45d0-4ebb-9b58-18246020566e\" (UID: \"006d44aa-45d0-4ebb-9b58-18246020566e\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.386277 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq" (OuterVolumeSpecName: "kube-api-access-bzscq") pod "11c5a6bf-395f-4b92-a6a3-99176c573b6b" (UID: "11c5a6bf-395f-4b92-a6a3-99176c573b6b"). InnerVolumeSpecName "kube-api-access-bzscq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.387233 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58" (OuterVolumeSpecName: "kube-api-access-jbp58") pod "006d44aa-45d0-4ebb-9b58-18246020566e" (UID: "006d44aa-45d0-4ebb-9b58-18246020566e"). InnerVolumeSpecName "kube-api-access-jbp58". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.388647 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs" (OuterVolumeSpecName: "logs") pod "006d44aa-45d0-4ebb-9b58-18246020566e" (UID: "006d44aa-45d0-4ebb-9b58-18246020566e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.421322 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "006d44aa-45d0-4ebb-9b58-18246020566e" (UID: "006d44aa-45d0-4ebb-9b58-18246020566e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.426414 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data" (OuterVolumeSpecName: "config-data") pod "006d44aa-45d0-4ebb-9b58-18246020566e" (UID: "006d44aa-45d0-4ebb-9b58-18246020566e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.431907 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data" (OuterVolumeSpecName: "config-data") pod "11c5a6bf-395f-4b92-a6a3-99176c573b6b" (UID: "11c5a6bf-395f-4b92-a6a3-99176c573b6b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.437890 4791 generic.go:334] "Generic (PLEG): container finished" podID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" exitCode=0 Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.437961 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"11c5a6bf-395f-4b92-a6a3-99176c573b6b","Type":"ContainerDied","Data":"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0"} Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.437987 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"11c5a6bf-395f-4b92-a6a3-99176c573b6b","Type":"ContainerDied","Data":"ee16799ecf56a8530fee223be8d79d0f4c103d8a0ef1d93114f28779e938611e"} Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.438003 4791 scope.go:117] "RemoveContainer" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.438191 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.440611 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "11c5a6bf-395f-4b92-a6a3-99176c573b6b" (UID: "11c5a6bf-395f-4b92-a6a3-99176c573b6b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.447860 4791 generic.go:334] "Generic (PLEG): container finished" podID="006d44aa-45d0-4ebb-9b58-18246020566e" containerID="e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50" exitCode=0 Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.447976 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerDied","Data":"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50"} Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.447998 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.448032 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"006d44aa-45d0-4ebb-9b58-18246020566e","Type":"ContainerDied","Data":"01a02b51c9d9747c4dc101f7d35f843caa1f70aa640b1846e040d4412bb5b64a"} Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.455480 4791 generic.go:334] "Generic (PLEG): container finished" podID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerID="006c9f4c55e116d52b069016a052d9f9ad4c2609d35b2373212532926de7992e" exitCode=0 Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.455517 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerDied","Data":"006c9f4c55e116d52b069016a052d9f9ad4c2609d35b2373212532926de7992e"} Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.473820 4791 scope.go:117] "RemoveContainer" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.474625 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0\": container with ID starting with efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0 not found: ID does not exist" containerID="efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.474661 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0"} err="failed to get container status \"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0\": rpc error: code = NotFound desc = could not find container \"efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0\": container with ID starting with efa7654cdc48650d11538aee120a9e83332b77c1c5d229f9e615e268aacfb5b0 not found: ID does not exist" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.474682 4791 scope.go:117] "RemoveContainer" containerID="e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.480561 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "006d44aa-45d0-4ebb-9b58-18246020566e" (UID: "006d44aa-45d0-4ebb-9b58-18246020566e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483290 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzscq\" (UniqueName: \"kubernetes.io/projected/11c5a6bf-395f-4b92-a6a3-99176c573b6b-kube-api-access-bzscq\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483315 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483328 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbp58\" (UniqueName: \"kubernetes.io/projected/006d44aa-45d0-4ebb-9b58-18246020566e-kube-api-access-jbp58\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483337 4791 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483346 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483355 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/006d44aa-45d0-4ebb-9b58-18246020566e-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483363 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/006d44aa-45d0-4ebb-9b58-18246020566e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.483395 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11c5a6bf-395f-4b92-a6a3-99176c573b6b-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.500508 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.501064 4791 scope.go:117] "RemoveContainer" containerID="63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.536868 4791 scope.go:117] "RemoveContainer" containerID="e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.537379 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50\": container with ID starting with e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50 not found: ID does not exist" containerID="e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.537426 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50"} err="failed to get container status \"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50\": rpc error: code = NotFound desc = could not find container \"e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50\": container with ID starting with e9f3faf96dd2728a0a14a172186d820c4e6068536780f59fb3a0eddf84c5dd50 not found: ID does not exist" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.537452 4791 scope.go:117] "RemoveContainer" containerID="63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.537844 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55\": container with ID starting with 63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55 not found: ID does not exist" containerID="63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.537892 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55"} err="failed to get container status \"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55\": rpc error: code = NotFound desc = could not find container \"63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55\": container with ID starting with 63b908872891102b7b77ae8f03f360b89e67e27a593ae111a436916c55dd2d55 not found: ID does not exist" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.686766 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687117 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687238 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687333 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjfgq\" (UniqueName: \"kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687460 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle\") pod \"041339eb-ee1d-48bb-9e31-c821067b5e04\" (UID: \"041339eb-ee1d-48bb-9e31-c821067b5e04\") " Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.687981 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs" (OuterVolumeSpecName: "logs") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.692122 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq" (OuterVolumeSpecName: "kube-api-access-gjfgq") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "kube-api-access-gjfgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.716449 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data" (OuterVolumeSpecName: "config-data") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.745070 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.751898 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.754302 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "041339eb-ee1d-48bb-9e31-c821067b5e04" (UID: "041339eb-ee1d-48bb-9e31-c821067b5e04"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789391 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789587 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjfgq\" (UniqueName: \"kubernetes.io/projected/041339eb-ee1d-48bb-9e31-c821067b5e04-kube-api-access-gjfgq\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789648 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789701 4791 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/041339eb-ee1d-48bb-9e31-c821067b5e04-logs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789758 4791 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-public-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.789812 4791 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/041339eb-ee1d-48bb-9e31-c821067b5e04-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.891764 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.908426 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.927925 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.958135 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977075 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977613 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977638 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977660 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="dnsmasq-dns" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977666 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="dnsmasq-dns" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977693 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-log" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977702 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-log" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977718 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977728 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977742 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerName="nova-scheduler-scheduler" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977748 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerName="nova-scheduler-scheduler" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977757 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="init" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977763 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="init" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977773 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-api" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977780 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-api" Feb 18 00:59:54 crc kubenswrapper[4791]: E0218 00:59:54.977810 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2b1a453-7c2e-47bd-8c65-c0d6e1939741" containerName="nova-manage" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.977823 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2b1a453-7c2e-47bd-8c65-c0d6e1939741" containerName="nova-manage" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978085 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-log" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978118 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-log" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978142 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" containerName="nova-api-api" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978243 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" containerName="nova-scheduler-scheduler" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978266 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2b1a453-7c2e-47bd-8c65-c0d6e1939741" containerName="nova-manage" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978281 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="78f72e20-5be7-448d-b18c-390ad193f0ea" containerName="dnsmasq-dns" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.978304 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" containerName="nova-metadata-metadata" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.979920 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.982039 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.982906 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Feb 18 00:59:54 crc kubenswrapper[4791]: I0218 00:59:54.999366 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.009100 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.010834 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.012883 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.022273 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.075451 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="006d44aa-45d0-4ebb-9b58-18246020566e" path="/var/lib/kubelet/pods/006d44aa-45d0-4ebb-9b58-18246020566e/volumes" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.076258 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11c5a6bf-395f-4b92-a6a3-99176c573b6b" path="/var/lib/kubelet/pods/11c5a6bf-395f-4b92-a6a3-99176c573b6b/volumes" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.105249 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-config-data\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.105302 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17b7a426-4dd0-4349-999d-1f323e337be0-logs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.105408 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.105615 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.105672 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk9z4\" (UniqueName: \"kubernetes.io/projected/17b7a426-4dd0-4349-999d-1f323e337be0-kube-api-access-kk9z4\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.207451 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-config-data\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.208177 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17b7a426-4dd0-4349-999d-1f323e337be0-logs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.208585 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/17b7a426-4dd0-4349-999d-1f323e337be0-logs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.209210 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.209781 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.209850 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2p2p\" (UniqueName: \"kubernetes.io/projected/8f5fd517-5092-4db3-9a3f-04fd12f9829a-kube-api-access-t2p2p\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.209921 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-config-data\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.210018 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.210099 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk9z4\" (UniqueName: \"kubernetes.io/projected/17b7a426-4dd0-4349-999d-1f323e337be0-kube-api-access-kk9z4\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.212026 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-config-data\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.212573 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.215016 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b7a426-4dd0-4349-999d-1f323e337be0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.228106 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk9z4\" (UniqueName: \"kubernetes.io/projected/17b7a426-4dd0-4349-999d-1f323e337be0-kube-api-access-kk9z4\") pod \"nova-metadata-0\" (UID: \"17b7a426-4dd0-4349-999d-1f323e337be0\") " pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.311971 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.312251 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.312373 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2p2p\" (UniqueName: \"kubernetes.io/projected/8f5fd517-5092-4db3-9a3f-04fd12f9829a-kube-api-access-t2p2p\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.312488 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-config-data\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.316044 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.318887 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f5fd517-5092-4db3-9a3f-04fd12f9829a-config-data\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.328055 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2p2p\" (UniqueName: \"kubernetes.io/projected/8f5fd517-5092-4db3-9a3f-04fd12f9829a-kube-api-access-t2p2p\") pod \"nova-scheduler-0\" (UID: \"8f5fd517-5092-4db3-9a3f-04fd12f9829a\") " pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.337874 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.501032 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"041339eb-ee1d-48bb-9e31-c821067b5e04","Type":"ContainerDied","Data":"beab38b6d7e2ea168882983d74c3488d0557c6d091316224275d5357b13c5af0"} Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.501085 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.501337 4791 scope.go:117] "RemoveContainer" containerID="006c9f4c55e116d52b069016a052d9f9ad4c2609d35b2373212532926de7992e" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.542292 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.564749 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.588328 4791 scope.go:117] "RemoveContainer" containerID="efc4eb2a2d94360d2be7a3ced97c6671bf8d9dca98c611bb6ff954c7a5b5ec75" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.592667 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.594957 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.597506 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.598929 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.599064 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.604684 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.623605 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.623904 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.623970 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqbhv\" (UniqueName: \"kubernetes.io/projected/c00433e0-9dce-47aa-a1b5-f4876de61b2e-kube-api-access-hqbhv\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.624097 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.624316 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c00433e0-9dce-47aa-a1b5-f4876de61b2e-logs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.624386 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-config-data\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.726687 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.726747 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.726786 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqbhv\" (UniqueName: \"kubernetes.io/projected/c00433e0-9dce-47aa-a1b5-f4876de61b2e-kube-api-access-hqbhv\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.726838 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.726976 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c00433e0-9dce-47aa-a1b5-f4876de61b2e-logs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.727084 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-config-data\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.727594 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c00433e0-9dce-47aa-a1b5-f4876de61b2e-logs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.731777 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-public-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.732783 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.733604 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.734525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c00433e0-9dce-47aa-a1b5-f4876de61b2e-config-data\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.745094 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqbhv\" (UniqueName: \"kubernetes.io/projected/c00433e0-9dce-47aa-a1b5-f4876de61b2e-kube-api-access-hqbhv\") pod \"nova-api-0\" (UID: \"c00433e0-9dce-47aa-a1b5-f4876de61b2e\") " pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: W0218 00:59:55.849239 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17b7a426_4dd0_4349_999d_1f323e337be0.slice/crio-bec11aeb946576a309e01db109075546518e3f50ff9e1339d565d4a2fe2058de WatchSource:0}: Error finding container bec11aeb946576a309e01db109075546518e3f50ff9e1339d565d4a2fe2058de: Status 404 returned error can't find the container with id bec11aeb946576a309e01db109075546518e3f50ff9e1339d565d4a2fe2058de Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.853477 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.945457 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Feb 18 00:59:55 crc kubenswrapper[4791]: I0218 00:59:55.977933 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Feb 18 00:59:55 crc kubenswrapper[4791]: W0218 00:59:55.986770 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f5fd517_5092_4db3_9a3f_04fd12f9829a.slice/crio-b54687ea28f162b1fc45cc09bf7a356fdbcfb57703d80a16f54703779c320b46 WatchSource:0}: Error finding container b54687ea28f162b1fc45cc09bf7a356fdbcfb57703d80a16f54703779c320b46: Status 404 returned error can't find the container with id b54687ea28f162b1fc45cc09bf7a356fdbcfb57703d80a16f54703779c320b46 Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.470303 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Feb 18 00:59:56 crc kubenswrapper[4791]: W0218 00:59:56.470448 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc00433e0_9dce_47aa_a1b5_f4876de61b2e.slice/crio-3eb9ba9afba25238371de05f484d6d361ddabd966cafadc17ca012c700082da1 WatchSource:0}: Error finding container 3eb9ba9afba25238371de05f484d6d361ddabd966cafadc17ca012c700082da1: Status 404 returned error can't find the container with id 3eb9ba9afba25238371de05f484d6d361ddabd966cafadc17ca012c700082da1 Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.513888 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c00433e0-9dce-47aa-a1b5-f4876de61b2e","Type":"ContainerStarted","Data":"3eb9ba9afba25238371de05f484d6d361ddabd966cafadc17ca012c700082da1"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.523645 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"17b7a426-4dd0-4349-999d-1f323e337be0","Type":"ContainerStarted","Data":"d2a646091397187e55a8635110e409808c00537c32661bcfeb0a4f8798ee933b"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.523704 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"17b7a426-4dd0-4349-999d-1f323e337be0","Type":"ContainerStarted","Data":"9417e43499b8367f8a0f68d5b83832653d7b2bc0635711e8d45ab1d6203fe3f3"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.523730 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"17b7a426-4dd0-4349-999d-1f323e337be0","Type":"ContainerStarted","Data":"bec11aeb946576a309e01db109075546518e3f50ff9e1339d565d4a2fe2058de"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.539281 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8f5fd517-5092-4db3-9a3f-04fd12f9829a","Type":"ContainerStarted","Data":"09e130416130d2b66fe5a855be6df647d215f1f35c78e7915ee80a1673bcda2c"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.539324 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8f5fd517-5092-4db3-9a3f-04fd12f9829a","Type":"ContainerStarted","Data":"b54687ea28f162b1fc45cc09bf7a356fdbcfb57703d80a16f54703779c320b46"} Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.562738 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.56271951 podStartE2EDuration="2.56271951s" podCreationTimestamp="2026-02-18 00:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:56.55816694 +0000 UTC m=+1538.126180110" watchObservedRunningTime="2026-02-18 00:59:56.56271951 +0000 UTC m=+1538.130732680" Feb 18 00:59:56 crc kubenswrapper[4791]: I0218 00:59:56.583515 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.583496263 podStartE2EDuration="2.583496263s" podCreationTimestamp="2026-02-18 00:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:56.57208924 +0000 UTC m=+1538.140102410" watchObservedRunningTime="2026-02-18 00:59:56.583496263 +0000 UTC m=+1538.151509433" Feb 18 00:59:57 crc kubenswrapper[4791]: I0218 00:59:57.093788 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="041339eb-ee1d-48bb-9e31-c821067b5e04" path="/var/lib/kubelet/pods/041339eb-ee1d-48bb-9e31-c821067b5e04/volumes" Feb 18 00:59:57 crc kubenswrapper[4791]: I0218 00:59:57.552960 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c00433e0-9dce-47aa-a1b5-f4876de61b2e","Type":"ContainerStarted","Data":"ca756021070ffc17a3ca01475cdf2b4cf955ce4de17e191763c56aacd73cb7ad"} Feb 18 00:59:57 crc kubenswrapper[4791]: I0218 00:59:57.553019 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c00433e0-9dce-47aa-a1b5-f4876de61b2e","Type":"ContainerStarted","Data":"9b6225b25529271998d400e43b471b3229e694445459ff2b63dc4fd736dc9807"} Feb 18 00:59:57 crc kubenswrapper[4791]: I0218 00:59:57.602849 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.60283245 podStartE2EDuration="2.60283245s" podCreationTimestamp="2026-02-18 00:59:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 00:59:57.599479546 +0000 UTC m=+1539.167492716" watchObservedRunningTime="2026-02-18 00:59:57.60283245 +0000 UTC m=+1539.170845620" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.172717 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt"] Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.175443 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.178540 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.180558 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.224382 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt"] Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.247814 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.247953 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm9f2\" (UniqueName: \"kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.248009 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.312930 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.312983 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.338426 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.350624 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.350725 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm9f2\" (UniqueName: \"kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.350781 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.353005 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.359272 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.369119 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm9f2\" (UniqueName: \"kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2\") pod \"collect-profiles-29522940-p48tt\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.507631 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.838549 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.841356 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.855261 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.868449 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-772gx\" (UniqueName: \"kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.868586 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.868667 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.964923 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt"] Feb 18 01:00:00 crc kubenswrapper[4791]: W0218 01:00:00.968855 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31dc1b9b_9b55_465e_ac36_794af7e2e0bd.slice/crio-e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f WatchSource:0}: Error finding container e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f: Status 404 returned error can't find the container with id e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.970381 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.970467 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-772gx\" (UniqueName: \"kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.970618 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.971196 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.971263 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:00 crc kubenswrapper[4791]: I0218 01:00:00.999409 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-772gx\" (UniqueName: \"kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx\") pod \"redhat-marketplace-c82zr\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:01 crc kubenswrapper[4791]: I0218 01:00:01.171278 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:00:01 crc kubenswrapper[4791]: I0218 01:00:01.604540 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" event={"ID":"31dc1b9b-9b55-465e-ac36-794af7e2e0bd","Type":"ContainerStarted","Data":"ca408d420d1fa739f58904fd48426a5143f4c5378c7160bbba28c72cfe32fe57"} Feb 18 01:00:01 crc kubenswrapper[4791]: I0218 01:00:01.604806 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" event={"ID":"31dc1b9b-9b55-465e-ac36-794af7e2e0bd","Type":"ContainerStarted","Data":"e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f"} Feb 18 01:00:01 crc kubenswrapper[4791]: I0218 01:00:01.631849 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" podStartSLOduration=1.6318160339999999 podStartE2EDuration="1.631816034s" podCreationTimestamp="2026-02-18 01:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:00:01.615763567 +0000 UTC m=+1543.183776737" watchObservedRunningTime="2026-02-18 01:00:01.631816034 +0000 UTC m=+1543.199829464" Feb 18 01:00:01 crc kubenswrapper[4791]: I0218 01:00:01.650904 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:00:02 crc kubenswrapper[4791]: I0218 01:00:02.621135 4791 generic.go:334] "Generic (PLEG): container finished" podID="31dc1b9b-9b55-465e-ac36-794af7e2e0bd" containerID="ca408d420d1fa739f58904fd48426a5143f4c5378c7160bbba28c72cfe32fe57" exitCode=0 Feb 18 01:00:02 crc kubenswrapper[4791]: I0218 01:00:02.621540 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" event={"ID":"31dc1b9b-9b55-465e-ac36-794af7e2e0bd","Type":"ContainerDied","Data":"ca408d420d1fa739f58904fd48426a5143f4c5378c7160bbba28c72cfe32fe57"} Feb 18 01:00:02 crc kubenswrapper[4791]: I0218 01:00:02.623549 4791 generic.go:334] "Generic (PLEG): container finished" podID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerID="879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475" exitCode=0 Feb 18 01:00:02 crc kubenswrapper[4791]: I0218 01:00:02.623583 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerDied","Data":"879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475"} Feb 18 01:00:02 crc kubenswrapper[4791]: I0218 01:00:02.623599 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerStarted","Data":"7d07d291dccd195dc0de230caa77eee2b900059f51b1cd8b0a1031ddbcc57b07"} Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.079240 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.158646 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm9f2\" (UniqueName: \"kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2\") pod \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.158876 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume\") pod \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.159036 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume\") pod \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\" (UID: \"31dc1b9b-9b55-465e-ac36-794af7e2e0bd\") " Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.159583 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume" (OuterVolumeSpecName: "config-volume") pod "31dc1b9b-9b55-465e-ac36-794af7e2e0bd" (UID: "31dc1b9b-9b55-465e-ac36-794af7e2e0bd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.160056 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.176400 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2" (OuterVolumeSpecName: "kube-api-access-xm9f2") pod "31dc1b9b-9b55-465e-ac36-794af7e2e0bd" (UID: "31dc1b9b-9b55-465e-ac36-794af7e2e0bd"). InnerVolumeSpecName "kube-api-access-xm9f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.181763 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "31dc1b9b-9b55-465e-ac36-794af7e2e0bd" (UID: "31dc1b9b-9b55-465e-ac36-794af7e2e0bd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.262773 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.262808 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm9f2\" (UniqueName: \"kubernetes.io/projected/31dc1b9b-9b55-465e-ac36-794af7e2e0bd-kube-api-access-xm9f2\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.354532 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.651241 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" event={"ID":"31dc1b9b-9b55-465e-ac36-794af7e2e0bd","Type":"ContainerDied","Data":"e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f"} Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.651284 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e63215a68a49db0e451cd9c59e1660be0b75b3c4e23859fea06a0e13d5ecb82f" Feb 18 01:00:04 crc kubenswrapper[4791]: I0218 01:00:04.651282 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.312741 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.312971 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.338789 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.368937 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.670986 4791 generic.go:334] "Generic (PLEG): container finished" podID="153d9910-d8a5-43d7-8683-dab4c032949b" containerID="b3531dbc69ff57f8c6aea658e21db3f0d9b11411c11e5dea409f166daf22056b" exitCode=137 Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.671324 4791 generic.go:334] "Generic (PLEG): container finished" podID="153d9910-d8a5-43d7-8683-dab4c032949b" containerID="5c178396279c61d27e41529cc52280e09a3234fba50b14c4a18f93432598052e" exitCode=137 Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.671190 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerDied","Data":"b3531dbc69ff57f8c6aea658e21db3f0d9b11411c11e5dea409f166daf22056b"} Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.672544 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerDied","Data":"5c178396279c61d27e41529cc52280e09a3234fba50b14c4a18f93432598052e"} Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.704349 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.946206 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 01:00:05 crc kubenswrapper[4791]: I0218 01:00:05.946253 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.005294 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.111374 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data\") pod \"153d9910-d8a5-43d7-8683-dab4c032949b\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.111499 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle\") pod \"153d9910-d8a5-43d7-8683-dab4c032949b\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.111636 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts\") pod \"153d9910-d8a5-43d7-8683-dab4c032949b\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.111718 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96b8p\" (UniqueName: \"kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p\") pod \"153d9910-d8a5-43d7-8683-dab4c032949b\" (UID: \"153d9910-d8a5-43d7-8683-dab4c032949b\") " Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.122425 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts" (OuterVolumeSpecName: "scripts") pod "153d9910-d8a5-43d7-8683-dab4c032949b" (UID: "153d9910-d8a5-43d7-8683-dab4c032949b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.123487 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p" (OuterVolumeSpecName: "kube-api-access-96b8p") pod "153d9910-d8a5-43d7-8683-dab4c032949b" (UID: "153d9910-d8a5-43d7-8683-dab4c032949b"). InnerVolumeSpecName "kube-api-access-96b8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.219531 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.219565 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96b8p\" (UniqueName: \"kubernetes.io/projected/153d9910-d8a5-43d7-8683-dab4c032949b-kube-api-access-96b8p\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.326272 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data" (OuterVolumeSpecName: "config-data") pod "153d9910-d8a5-43d7-8683-dab4c032949b" (UID: "153d9910-d8a5-43d7-8683-dab4c032949b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.331298 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="17b7a426-4dd0-4349-999d-1f323e337be0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.8:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.331613 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="17b7a426-4dd0-4349-999d-1f323e337be0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.8:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.363424 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "153d9910-d8a5-43d7-8683-dab4c032949b" (UID: "153d9910-d8a5-43d7-8683-dab4c032949b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.424279 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.424316 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/153d9910-d8a5-43d7-8683-dab4c032949b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.685862 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"153d9910-d8a5-43d7-8683-dab4c032949b","Type":"ContainerDied","Data":"2930ca4a2e54c38da2baa9aaee9af34359d9b3f419bed2367e258ec728fdd9dd"} Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.685923 4791 scope.go:117] "RemoveContainer" containerID="b3531dbc69ff57f8c6aea658e21db3f0d9b11411c11e5dea409f166daf22056b" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.685946 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.725866 4791 scope.go:117] "RemoveContainer" containerID="5c178396279c61d27e41529cc52280e09a3234fba50b14c4a18f93432598052e" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.795962 4791 scope.go:117] "RemoveContainer" containerID="9cd2c0a8a75f4033983e0d35e14d0964ee728560b945a757583825bd9f8fb7db" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.825072 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.836875 4791 scope.go:117] "RemoveContainer" containerID="9ca55234f7fded51ec0c6b658c7b77583e8d3098a7b8e005247ca2a013d27fa4" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.843331 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859000 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Feb 18 01:00:06 crc kubenswrapper[4791]: E0218 01:00:06.859589 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-listener" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859608 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-listener" Feb 18 01:00:06 crc kubenswrapper[4791]: E0218 01:00:06.859631 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-notifier" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859638 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-notifier" Feb 18 01:00:06 crc kubenswrapper[4791]: E0218 01:00:06.859658 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-api" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859677 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-api" Feb 18 01:00:06 crc kubenswrapper[4791]: E0218 01:00:06.859695 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31dc1b9b-9b55-465e-ac36-794af7e2e0bd" containerName="collect-profiles" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859702 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="31dc1b9b-9b55-465e-ac36-794af7e2e0bd" containerName="collect-profiles" Feb 18 01:00:06 crc kubenswrapper[4791]: E0218 01:00:06.859713 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-evaluator" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859718 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-evaluator" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859936 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-notifier" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859968 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="31dc1b9b-9b55-465e-ac36-794af7e2e0bd" containerName="collect-profiles" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859984 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-listener" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.859997 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-evaluator" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.860006 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" containerName="aodh-api" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.862125 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.869875 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.870240 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.870418 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.870628 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-n4w9p" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.874607 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.876752 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.962265 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c00433e0-9dce-47aa-a1b5-f4876de61b2e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.10:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.962302 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c00433e0-9dce-47aa-a1b5-f4876de61b2e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.10:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.965979 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-public-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.966026 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.966105 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qdb7\" (UniqueName: \"kubernetes.io/projected/0ccf0634-a041-40a2-9213-614aac6f82c4-kube-api-access-8qdb7\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.966186 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-internal-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.966235 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-config-data\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:06 crc kubenswrapper[4791]: I0218 01:00:06.966294 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-scripts\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067303 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qdb7\" (UniqueName: \"kubernetes.io/projected/0ccf0634-a041-40a2-9213-614aac6f82c4-kube-api-access-8qdb7\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067366 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-internal-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067408 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-config-data\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067458 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-scripts\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067526 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-public-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.067555 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.071816 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-scripts\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.071985 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.072313 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-internal-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.073955 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-config-data\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.075078 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ccf0634-a041-40a2-9213-614aac6f82c4-public-tls-certs\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.075196 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="153d9910-d8a5-43d7-8683-dab4c032949b" path="/var/lib/kubelet/pods/153d9910-d8a5-43d7-8683-dab4c032949b/volumes" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.091506 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qdb7\" (UniqueName: \"kubernetes.io/projected/0ccf0634-a041-40a2-9213-614aac6f82c4-kube-api-access-8qdb7\") pod \"aodh-0\" (UID: \"0ccf0634-a041-40a2-9213-614aac6f82c4\") " pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.191734 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.669992 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Feb 18 01:00:07 crc kubenswrapper[4791]: W0218 01:00:07.678373 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ccf0634_a041_40a2_9213_614aac6f82c4.slice/crio-5c1c8cca70494767ac6df8b2ac275020e022527004eec2e7ed061408bf52808b WatchSource:0}: Error finding container 5c1c8cca70494767ac6df8b2ac275020e022527004eec2e7ed061408bf52808b: Status 404 returned error can't find the container with id 5c1c8cca70494767ac6df8b2ac275020e022527004eec2e7ed061408bf52808b Feb 18 01:00:07 crc kubenswrapper[4791]: I0218 01:00:07.698042 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0ccf0634-a041-40a2-9213-614aac6f82c4","Type":"ContainerStarted","Data":"5c1c8cca70494767ac6df8b2ac275020e022527004eec2e7ed061408bf52808b"} Feb 18 01:00:08 crc kubenswrapper[4791]: I0218 01:00:08.709061 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0ccf0634-a041-40a2-9213-614aac6f82c4","Type":"ContainerStarted","Data":"87e3773720b22c8fc7f465fc0dd27d482ebe4b35c31ce6c6d4de6fbbfcf00e0d"} Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.722972 4791 generic.go:334] "Generic (PLEG): container finished" podID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerID="48f24dd266741d954f6136b6dbf7292affff9b6f8cc7be3be601e1e1c0ed7021" exitCode=137 Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.723176 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerDied","Data":"48f24dd266741d954f6136b6dbf7292affff9b6f8cc7be3be601e1e1c0ed7021"} Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.724971 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7486cd5a-bf0d-4162-95ff-505011bcbda3","Type":"ContainerDied","Data":"a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a"} Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.725088 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a782621fdafa86320a197e75bcb58ac417d1f4b2d2197eed5eddbd639454425a" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.726886 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0ccf0634-a041-40a2-9213-614aac6f82c4","Type":"ContainerStarted","Data":"5d3eb44f28feda92c7e200e2b34f24f8f07579698585e63ebd247b2fc47a5e07"} Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.757780 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.838919 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839004 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839035 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839149 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmvl7\" (UniqueName: \"kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839219 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839263 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.839443 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd\") pod \"7486cd5a-bf0d-4162-95ff-505011bcbda3\" (UID: \"7486cd5a-bf0d-4162-95ff-505011bcbda3\") " Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.840056 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.840593 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.843435 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts" (OuterVolumeSpecName: "scripts") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.843770 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.849830 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7" (OuterVolumeSpecName: "kube-api-access-lmvl7") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "kube-api-access-lmvl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.907852 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.943381 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7486cd5a-bf0d-4162-95ff-505011bcbda3-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.943752 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.943764 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmvl7\" (UniqueName: \"kubernetes.io/projected/7486cd5a-bf0d-4162-95ff-505011bcbda3-kube-api-access-lmvl7\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.943777 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.977844 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:09 crc kubenswrapper[4791]: I0218 01:00:09.993277 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data" (OuterVolumeSpecName: "config-data") pod "7486cd5a-bf0d-4162-95ff-505011bcbda3" (UID: "7486cd5a-bf0d-4162-95ff-505011bcbda3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.046256 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.046290 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7486cd5a-bf0d-4162-95ff-505011bcbda3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.745312 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.751761 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0ccf0634-a041-40a2-9213-614aac6f82c4","Type":"ContainerStarted","Data":"5356617615d29a5c3b6249816f25e770477b94549526dd4fc96fad5f5c0ccdd4"} Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.752126 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0ccf0634-a041-40a2-9213-614aac6f82c4","Type":"ContainerStarted","Data":"68bdb890becad6c5141c79033f062e15346db2918bee21f814147ba50088fffb"} Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.803902 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.220694293 podStartE2EDuration="4.803878878s" podCreationTimestamp="2026-02-18 01:00:06 +0000 UTC" firstStartedPulling="2026-02-18 01:00:07.680784533 +0000 UTC m=+1549.248797693" lastFinishedPulling="2026-02-18 01:00:10.263969108 +0000 UTC m=+1551.831982278" observedRunningTime="2026-02-18 01:00:10.800247145 +0000 UTC m=+1552.368260325" watchObservedRunningTime="2026-02-18 01:00:10.803878878 +0000 UTC m=+1552.371892058" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.848281 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.865594 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.901722 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:10 crc kubenswrapper[4791]: E0218 01:00:10.902458 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="proxy-httpd" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902482 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="proxy-httpd" Feb 18 01:00:10 crc kubenswrapper[4791]: E0218 01:00:10.902504 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-notification-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902513 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-notification-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: E0218 01:00:10.902533 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="sg-core" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902543 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="sg-core" Feb 18 01:00:10 crc kubenswrapper[4791]: E0218 01:00:10.902578 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-central-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902587 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-central-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902855 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-notification-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902877 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="ceilometer-central-agent" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902894 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="sg-core" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.902920 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" containerName="proxy-httpd" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.917854 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.924792 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.926545 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.937778 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971445 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971549 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971596 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971661 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-774t7\" (UniqueName: \"kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971684 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971727 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:10 crc kubenswrapper[4791]: I0218 01:00:10.971775 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073553 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073639 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073683 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073744 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-774t7\" (UniqueName: \"kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073768 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073811 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.073857 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.074336 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.080913 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7486cd5a-bf0d-4162-95ff-505011bcbda3" path="/var/lib/kubelet/pods/7486cd5a-bf0d-4162-95ff-505011bcbda3/volumes" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.082993 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.084012 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.086038 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.094578 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.103008 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.132826 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-774t7\" (UniqueName: \"kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7\") pod \"ceilometer-0\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.266072 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:11 crc kubenswrapper[4791]: I0218 01:00:11.863316 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:11 crc kubenswrapper[4791]: W0218 01:00:11.877919 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1bac8251_a5b5_4029_b90a_275ee65ea0db.slice/crio-63589069182b73675c7ddfd2083b7271f644721e0eed9c747e2c835e33e85e87 WatchSource:0}: Error finding container 63589069182b73675c7ddfd2083b7271f644721e0eed9c747e2c835e33e85e87: Status 404 returned error can't find the container with id 63589069182b73675c7ddfd2083b7271f644721e0eed9c747e2c835e33e85e87 Feb 18 01:00:12 crc kubenswrapper[4791]: I0218 01:00:12.767145 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerStarted","Data":"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae"} Feb 18 01:00:12 crc kubenswrapper[4791]: I0218 01:00:12.767480 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerStarted","Data":"63589069182b73675c7ddfd2083b7271f644721e0eed9c747e2c835e33e85e87"} Feb 18 01:00:13 crc kubenswrapper[4791]: I0218 01:00:13.778836 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerStarted","Data":"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138"} Feb 18 01:00:14 crc kubenswrapper[4791]: I0218 01:00:14.792570 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerStarted","Data":"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e"} Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.322044 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.323689 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.327846 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.807004 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.964030 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.964564 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.965816 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Feb 18 01:00:15 crc kubenswrapper[4791]: I0218 01:00:15.976305 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 18 01:00:16 crc kubenswrapper[4791]: I0218 01:00:16.823453 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Feb 18 01:00:16 crc kubenswrapper[4791]: I0218 01:00:16.832323 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Feb 18 01:00:21 crc kubenswrapper[4791]: I0218 01:00:21.889773 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerStarted","Data":"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098"} Feb 18 01:00:21 crc kubenswrapper[4791]: I0218 01:00:21.890428 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 01:00:21 crc kubenswrapper[4791]: I0218 01:00:21.907233 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.508917562 podStartE2EDuration="11.907217064s" podCreationTimestamp="2026-02-18 01:00:10 +0000 UTC" firstStartedPulling="2026-02-18 01:00:11.880685772 +0000 UTC m=+1553.448698942" lastFinishedPulling="2026-02-18 01:00:21.278985234 +0000 UTC m=+1562.846998444" observedRunningTime="2026-02-18 01:00:21.905973896 +0000 UTC m=+1563.473987086" watchObservedRunningTime="2026-02-18 01:00:21.907217064 +0000 UTC m=+1563.475230234" Feb 18 01:00:41 crc kubenswrapper[4791]: I0218 01:00:41.272728 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 18 01:00:44 crc kubenswrapper[4791]: I0218 01:00:44.858752 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:44 crc kubenswrapper[4791]: I0218 01:00:44.861096 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" containerName="kube-state-metrics" containerID="cri-o://8e630aef482dec524705a367b2c2c8fe281886e8f8a67d1cec35a97cced6e80a" gracePeriod=30 Feb 18 01:00:44 crc kubenswrapper[4791]: I0218 01:00:44.974129 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:44 crc kubenswrapper[4791]: I0218 01:00:44.974688 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="797178cc-4ac7-4338-88d7-b80656cd9566" containerName="mysqld-exporter" containerID="cri-o://2b91226754d78ba69b8f9a7179313ca25dfd26860d09756a8959ba659a128373" gracePeriod=30 Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.206200 4791 generic.go:334] "Generic (PLEG): container finished" podID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" containerID="8e630aef482dec524705a367b2c2c8fe281886e8f8a67d1cec35a97cced6e80a" exitCode=2 Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.206261 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ccdd7e1f-2546-4f46-93cd-7e07d3db2182","Type":"ContainerDied","Data":"8e630aef482dec524705a367b2c2c8fe281886e8f8a67d1cec35a97cced6e80a"} Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.210420 4791 generic.go:334] "Generic (PLEG): container finished" podID="797178cc-4ac7-4338-88d7-b80656cd9566" containerID="2b91226754d78ba69b8f9a7179313ca25dfd26860d09756a8959ba659a128373" exitCode=2 Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.210471 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"797178cc-4ac7-4338-88d7-b80656cd9566","Type":"ContainerDied","Data":"2b91226754d78ba69b8f9a7179313ca25dfd26860d09756a8959ba659a128373"} Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.608178 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.713870 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.737233 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdgqh\" (UniqueName: \"kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh\") pod \"ccdd7e1f-2546-4f46-93cd-7e07d3db2182\" (UID: \"ccdd7e1f-2546-4f46-93cd-7e07d3db2182\") " Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.746389 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh" (OuterVolumeSpecName: "kube-api-access-jdgqh") pod "ccdd7e1f-2546-4f46-93cd-7e07d3db2182" (UID: "ccdd7e1f-2546-4f46-93cd-7e07d3db2182"). InnerVolumeSpecName "kube-api-access-jdgqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.839325 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data\") pod \"797178cc-4ac7-4338-88d7-b80656cd9566\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.839478 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k62jd\" (UniqueName: \"kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd\") pod \"797178cc-4ac7-4338-88d7-b80656cd9566\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.839550 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle\") pod \"797178cc-4ac7-4338-88d7-b80656cd9566\" (UID: \"797178cc-4ac7-4338-88d7-b80656cd9566\") " Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.840059 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdgqh\" (UniqueName: \"kubernetes.io/projected/ccdd7e1f-2546-4f46-93cd-7e07d3db2182-kube-api-access-jdgqh\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.843341 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd" (OuterVolumeSpecName: "kube-api-access-k62jd") pod "797178cc-4ac7-4338-88d7-b80656cd9566" (UID: "797178cc-4ac7-4338-88d7-b80656cd9566"). InnerVolumeSpecName "kube-api-access-k62jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.882314 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "797178cc-4ac7-4338-88d7-b80656cd9566" (UID: "797178cc-4ac7-4338-88d7-b80656cd9566"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.895819 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data" (OuterVolumeSpecName: "config-data") pod "797178cc-4ac7-4338-88d7-b80656cd9566" (UID: "797178cc-4ac7-4338-88d7-b80656cd9566"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.941631 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k62jd\" (UniqueName: \"kubernetes.io/projected/797178cc-4ac7-4338-88d7-b80656cd9566-kube-api-access-k62jd\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.941661 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:45 crc kubenswrapper[4791]: I0218 01:00:45.941673 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797178cc-4ac7-4338-88d7-b80656cd9566-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.222638 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"797178cc-4ac7-4338-88d7-b80656cd9566","Type":"ContainerDied","Data":"eb0c8cd7e6f8d03a2d0f5e52cef8f62a5846398fdf5b2058114ae0fbe5103ab9"} Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.222678 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.222699 4791 scope.go:117] "RemoveContainer" containerID="2b91226754d78ba69b8f9a7179313ca25dfd26860d09756a8959ba659a128373" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.225087 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ccdd7e1f-2546-4f46-93cd-7e07d3db2182","Type":"ContainerDied","Data":"b27828fe159a614bd83755a6997f3a5c339d879bb29f9784ec44c8423ded16eb"} Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.225133 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.289363 4791 scope.go:117] "RemoveContainer" containerID="8e630aef482dec524705a367b2c2c8fe281886e8f8a67d1cec35a97cced6e80a" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.299770 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.325545 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.340119 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.356589 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.367869 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: E0218 01:00:46.368452 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" containerName="kube-state-metrics" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.368465 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" containerName="kube-state-metrics" Feb 18 01:00:46 crc kubenswrapper[4791]: E0218 01:00:46.368711 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797178cc-4ac7-4338-88d7-b80656cd9566" containerName="mysqld-exporter" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.368718 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="797178cc-4ac7-4338-88d7-b80656cd9566" containerName="mysqld-exporter" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.368934 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" containerName="kube-state-metrics" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.368946 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="797178cc-4ac7-4338-88d7-b80656cd9566" containerName="mysqld-exporter" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.369837 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.372943 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.373127 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.383853 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.385859 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.389513 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.393814 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.407873 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.417602 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456020 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgvg4\" (UniqueName: \"kubernetes.io/projected/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-api-access-xgvg4\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456174 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx75f\" (UniqueName: \"kubernetes.io/projected/e0395872-356e-4d2f-ba73-bd4d2feed605-kube-api-access-cx75f\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456215 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456234 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456255 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-config-data\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456381 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456540 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.456740 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.559722 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.559862 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgvg4\" (UniqueName: \"kubernetes.io/projected/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-api-access-xgvg4\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560273 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx75f\" (UniqueName: \"kubernetes.io/projected/e0395872-356e-4d2f-ba73-bd4d2feed605-kube-api-access-cx75f\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560304 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560472 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560496 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-config-data\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560531 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.560601 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.564841 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.565334 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.565669 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.565858 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.566331 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-config-data\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.566525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0395872-356e-4d2f-ba73-bd4d2feed605-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.580643 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgvg4\" (UniqueName: \"kubernetes.io/projected/23f7febf-286b-49f0-a4db-9aada2d4a4d7-kube-api-access-xgvg4\") pod \"kube-state-metrics-0\" (UID: \"23f7febf-286b-49f0-a4db-9aada2d4a4d7\") " pod="openstack/kube-state-metrics-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.582425 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx75f\" (UniqueName: \"kubernetes.io/projected/e0395872-356e-4d2f-ba73-bd4d2feed605-kube-api-access-cx75f\") pod \"mysqld-exporter-0\" (UID: \"e0395872-356e-4d2f-ba73-bd4d2feed605\") " pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.694985 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Feb 18 01:00:46 crc kubenswrapper[4791]: I0218 01:00:46.704209 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.083081 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="797178cc-4ac7-4338-88d7-b80656cd9566" path="/var/lib/kubelet/pods/797178cc-4ac7-4338-88d7-b80656cd9566/volumes" Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.084738 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccdd7e1f-2546-4f46-93cd-7e07d3db2182" path="/var/lib/kubelet/pods/ccdd7e1f-2546-4f46-93cd-7e07d3db2182/volumes" Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.254723 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Feb 18 01:00:47 crc kubenswrapper[4791]: W0218 01:00:47.259635 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23f7febf_286b_49f0_a4db_9aada2d4a4d7.slice/crio-3c050bf434fb863f0429fa41a9e4153996a4c905b705391bf2d60ff6c7c00342 WatchSource:0}: Error finding container 3c050bf434fb863f0429fa41a9e4153996a4c905b705391bf2d60ff6c7c00342: Status 404 returned error can't find the container with id 3c050bf434fb863f0429fa41a9e4153996a4c905b705391bf2d60ff6c7c00342 Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.262334 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:00:47 crc kubenswrapper[4791]: W0218 01:00:47.298508 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0395872_356e_4d2f_ba73_bd4d2feed605.slice/crio-290c4396847c9e92d36c3e874c2a4743d5dd9aa8fc851020217f85dac9d3d5b4 WatchSource:0}: Error finding container 290c4396847c9e92d36c3e874c2a4743d5dd9aa8fc851020217f85dac9d3d5b4: Status 404 returned error can't find the container with id 290c4396847c9e92d36c3e874c2a4743d5dd9aa8fc851020217f85dac9d3d5b4 Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.303111 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.457625 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.458516 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-central-agent" containerID="cri-o://76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae" gracePeriod=30 Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.458597 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-notification-agent" containerID="cri-o://e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138" gracePeriod=30 Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.458652 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="sg-core" containerID="cri-o://4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e" gracePeriod=30 Feb 18 01:00:47 crc kubenswrapper[4791]: I0218 01:00:47.458561 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="proxy-httpd" containerID="cri-o://15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098" gracePeriod=30 Feb 18 01:00:47 crc kubenswrapper[4791]: E0218 01:00:47.875463 4791 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1bac8251_a5b5_4029_b90a_275ee65ea0db.slice/crio-conmon-76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae.scope\": RecentStats: unable to find data in memory cache]" Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.248937 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"e0395872-356e-4d2f-ba73-bd4d2feed605","Type":"ContainerStarted","Data":"4f0b195206afcd6b49547a676821700239e65185102c969820d69e99de9682f9"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.248992 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"e0395872-356e-4d2f-ba73-bd4d2feed605","Type":"ContainerStarted","Data":"290c4396847c9e92d36c3e874c2a4743d5dd9aa8fc851020217f85dac9d3d5b4"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252429 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerID="15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098" exitCode=0 Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252469 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerID="4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e" exitCode=2 Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252477 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerID="76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae" exitCode=0 Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252501 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerDied","Data":"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252548 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerDied","Data":"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.252560 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerDied","Data":"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.253995 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23f7febf-286b-49f0-a4db-9aada2d4a4d7","Type":"ContainerStarted","Data":"2534882b47833b2152a763a85467e4376704a3495ac816fee2103338d0c0d183"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.254027 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23f7febf-286b-49f0-a4db-9aada2d4a4d7","Type":"ContainerStarted","Data":"3c050bf434fb863f0429fa41a9e4153996a4c905b705391bf2d60ff6c7c00342"} Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.254104 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.265782 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=1.7571316590000001 podStartE2EDuration="2.26576159s" podCreationTimestamp="2026-02-18 01:00:46 +0000 UTC" firstStartedPulling="2026-02-18 01:00:47.301180377 +0000 UTC m=+1588.869193547" lastFinishedPulling="2026-02-18 01:00:47.809810308 +0000 UTC m=+1589.377823478" observedRunningTime="2026-02-18 01:00:48.263014775 +0000 UTC m=+1589.831027955" watchObservedRunningTime="2026-02-18 01:00:48.26576159 +0000 UTC m=+1589.833774760" Feb 18 01:00:48 crc kubenswrapper[4791]: I0218 01:00:48.304196 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.893709564 podStartE2EDuration="2.304177899s" podCreationTimestamp="2026-02-18 01:00:46 +0000 UTC" firstStartedPulling="2026-02-18 01:00:47.262042626 +0000 UTC m=+1588.830055796" lastFinishedPulling="2026-02-18 01:00:47.672510961 +0000 UTC m=+1589.240524131" observedRunningTime="2026-02-18 01:00:48.278376141 +0000 UTC m=+1589.846389311" watchObservedRunningTime="2026-02-18 01:00:48.304177899 +0000 UTC m=+1589.872191069" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.248032 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.286370 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerID="e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138" exitCode=0 Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.286412 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerDied","Data":"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138"} Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.286452 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1bac8251-a5b5-4029-b90a-275ee65ea0db","Type":"ContainerDied","Data":"63589069182b73675c7ddfd2083b7271f644721e0eed9c747e2c835e33e85e87"} Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.286459 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.286472 4791 scope.go:117] "RemoveContainer" containerID="15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.326559 4791 scope.go:117] "RemoveContainer" containerID="4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.347952 4791 scope.go:117] "RemoveContainer" containerID="e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.360926 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361193 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-774t7\" (UniqueName: \"kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361326 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361360 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361406 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361477 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.361515 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd\") pod \"1bac8251-a5b5-4029-b90a-275ee65ea0db\" (UID: \"1bac8251-a5b5-4029-b90a-275ee65ea0db\") " Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.362895 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.363709 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.368036 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts" (OuterVolumeSpecName: "scripts") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.369296 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7" (OuterVolumeSpecName: "kube-api-access-774t7") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "kube-api-access-774t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.378986 4791 scope.go:117] "RemoveContainer" containerID="76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.397568 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.464687 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-774t7\" (UniqueName: \"kubernetes.io/projected/1bac8251-a5b5-4029-b90a-275ee65ea0db-kube-api-access-774t7\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.464721 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.464730 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.464738 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1bac8251-a5b5-4029-b90a-275ee65ea0db-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.464746 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.469936 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.502866 4791 scope.go:117] "RemoveContainer" containerID="15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.505815 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098\": container with ID starting with 15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098 not found: ID does not exist" containerID="15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.505979 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098"} err="failed to get container status \"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098\": rpc error: code = NotFound desc = could not find container \"15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098\": container with ID starting with 15dec771ce35a432f634f8a002475ebd821455bb3a095712137fff8634855098 not found: ID does not exist" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.506094 4791 scope.go:117] "RemoveContainer" containerID="4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.507272 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e\": container with ID starting with 4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e not found: ID does not exist" containerID="4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.507325 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e"} err="failed to get container status \"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e\": rpc error: code = NotFound desc = could not find container \"4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e\": container with ID starting with 4e213a726ac093bb5f85732ee2304349ddb999b1839ceabfc8df605180016a9e not found: ID does not exist" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.507359 4791 scope.go:117] "RemoveContainer" containerID="e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.508003 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138\": container with ID starting with e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138 not found: ID does not exist" containerID="e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.508020 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138"} err="failed to get container status \"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138\": rpc error: code = NotFound desc = could not find container \"e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138\": container with ID starting with e03ed6a774d632465dcd6428aa14a659b86f77b37402ae80beb65411ddf80138 not found: ID does not exist" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.508032 4791 scope.go:117] "RemoveContainer" containerID="76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.510040 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae\": container with ID starting with 76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae not found: ID does not exist" containerID="76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.510064 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae"} err="failed to get container status \"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae\": rpc error: code = NotFound desc = could not find container \"76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae\": container with ID starting with 76e7f5ccba30445125a705538ce03d818ad2cbba026143d9957e2e2f84e501ae not found: ID does not exist" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.512671 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data" (OuterVolumeSpecName: "config-data") pod "1bac8251-a5b5-4029-b90a-275ee65ea0db" (UID: "1bac8251-a5b5-4029-b90a-275ee65ea0db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.566895 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.566940 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bac8251-a5b5-4029-b90a-275ee65ea0db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.623118 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.635624 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.675464 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.676571 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="proxy-httpd" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.676591 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="proxy-httpd" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.676609 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-central-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.676616 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-central-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.676640 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="sg-core" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.676647 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="sg-core" Feb 18 01:00:50 crc kubenswrapper[4791]: E0218 01:00:50.676676 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-notification-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.676683 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-notification-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.677526 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="sg-core" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.677562 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="proxy-httpd" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.677590 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-central-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.677612 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" containerName="ceilometer-notification-agent" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.683892 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.690135 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.690530 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.691336 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.713709 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.879992 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.880265 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.880416 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.880674 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.880760 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4zt9\" (UniqueName: \"kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.880801 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.881062 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.881170 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983435 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983816 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983855 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983926 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983956 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.983962 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4zt9\" (UniqueName: \"kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.984010 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.984134 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.984197 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.984508 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.988071 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.988799 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.988889 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.989100 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:50 crc kubenswrapper[4791]: I0218 01:00:50.990815 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:51 crc kubenswrapper[4791]: I0218 01:00:51.008331 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4zt9\" (UniqueName: \"kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9\") pod \"ceilometer-0\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " pod="openstack/ceilometer-0" Feb 18 01:00:51 crc kubenswrapper[4791]: I0218 01:00:51.020675 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:00:51 crc kubenswrapper[4791]: I0218 01:00:51.079006 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bac8251-a5b5-4029-b90a-275ee65ea0db" path="/var/lib/kubelet/pods/1bac8251-a5b5-4029-b90a-275ee65ea0db/volumes" Feb 18 01:00:51 crc kubenswrapper[4791]: I0218 01:00:51.522026 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.273204 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-g5ccz"] Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.289300 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-g5ccz"] Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.376402 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-scphk"] Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.378111 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.398988 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-scphk"] Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.412388 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerStarted","Data":"f872ba81222a223199ae6dfc7eb01c197de1a86e4261e7c28eb0e6276d3076fb"} Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.436852 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgn7d\" (UniqueName: \"kubernetes.io/projected/68e5e8d6-5771-4045-858a-4a39b2db99f9-kube-api-access-lgn7d\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.436980 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-config-data\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.437011 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-combined-ca-bundle\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.540226 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-config-data\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.540308 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-combined-ca-bundle\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.540459 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgn7d\" (UniqueName: \"kubernetes.io/projected/68e5e8d6-5771-4045-858a-4a39b2db99f9-kube-api-access-lgn7d\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.546379 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-config-data\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.548017 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68e5e8d6-5771-4045-858a-4a39b2db99f9-combined-ca-bundle\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.555637 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgn7d\" (UniqueName: \"kubernetes.io/projected/68e5e8d6-5771-4045-858a-4a39b2db99f9-kube-api-access-lgn7d\") pod \"heat-db-sync-scphk\" (UID: \"68e5e8d6-5771-4045-858a-4a39b2db99f9\") " pod="openstack/heat-db-sync-scphk" Feb 18 01:00:52 crc kubenswrapper[4791]: I0218 01:00:52.759817 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-scphk" Feb 18 01:00:53 crc kubenswrapper[4791]: I0218 01:00:53.078376 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f39e5ec-0b51-4c0e-9a95-95c3e69163b8" path="/var/lib/kubelet/pods/4f39e5ec-0b51-4c0e-9a95-95c3e69163b8/volumes" Feb 18 01:00:53 crc kubenswrapper[4791]: W0218 01:00:53.246053 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68e5e8d6_5771_4045_858a_4a39b2db99f9.slice/crio-9bd1713363649f658d0a0731bf8df6b98ade5ade17486583289e443b0a42d16b WatchSource:0}: Error finding container 9bd1713363649f658d0a0731bf8df6b98ade5ade17486583289e443b0a42d16b: Status 404 returned error can't find the container with id 9bd1713363649f658d0a0731bf8df6b98ade5ade17486583289e443b0a42d16b Feb 18 01:00:53 crc kubenswrapper[4791]: I0218 01:00:53.246911 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-scphk"] Feb 18 01:00:53 crc kubenswrapper[4791]: E0218 01:00:53.378305 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:00:53 crc kubenswrapper[4791]: E0218 01:00:53.378365 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:00:53 crc kubenswrapper[4791]: E0218 01:00:53.378480 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:00:53 crc kubenswrapper[4791]: E0218 01:00:53.379801 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:00:53 crc kubenswrapper[4791]: I0218 01:00:53.425191 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-scphk" event={"ID":"68e5e8d6-5771-4045-858a-4a39b2db99f9","Type":"ContainerStarted","Data":"9bd1713363649f658d0a0731bf8df6b98ade5ade17486583289e443b0a42d16b"} Feb 18 01:00:53 crc kubenswrapper[4791]: E0218 01:00:53.427210 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:00:53 crc kubenswrapper[4791]: I0218 01:00:53.427535 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerStarted","Data":"645d82e19cf67227a6cc3840d9814a8717abac72619fae8e06c113395b996af2"} Feb 18 01:00:54 crc kubenswrapper[4791]: I0218 01:00:54.440713 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerStarted","Data":"99f5b0bcd78611195226e82a4917bf1ec49ac3467917762b14aeb02397478504"} Feb 18 01:00:54 crc kubenswrapper[4791]: I0218 01:00:54.441367 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerStarted","Data":"dd8f0cb3bd8ebeac41acf4143937ae5931d38504296c21e1781a99e3a7c10d04"} Feb 18 01:00:54 crc kubenswrapper[4791]: E0218 01:00:54.442503 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:00:54 crc kubenswrapper[4791]: I0218 01:00:54.930308 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:00:56 crc kubenswrapper[4791]: I0218 01:00:56.058148 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:00:56 crc kubenswrapper[4791]: I0218 01:00:56.723538 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Feb 18 01:00:57 crc kubenswrapper[4791]: I0218 01:00:57.656246 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:00:59 crc kubenswrapper[4791]: I0218 01:00:59.708199 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="rabbitmq" containerID="cri-o://5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5" gracePeriod=604796 Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.151482 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29522941-j7h5m"] Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.153318 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.171506 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29522941-j7h5m"] Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.320533 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.320584 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.320736 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.321383 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xxfn\" (UniqueName: \"kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.427947 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xxfn\" (UniqueName: \"kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.428020 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.428048 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.428103 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.435136 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.438285 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.459041 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.509007 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xxfn\" (UniqueName: \"kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn\") pod \"keystone-cron-29522941-j7h5m\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:00 crc kubenswrapper[4791]: I0218 01:01:00.539882 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:01 crc kubenswrapper[4791]: I0218 01:01:01.115906 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29522941-j7h5m"] Feb 18 01:01:01 crc kubenswrapper[4791]: I0218 01:01:01.492081 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="rabbitmq" containerID="cri-o://a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604" gracePeriod=604795 Feb 18 01:01:01 crc kubenswrapper[4791]: I0218 01:01:01.523178 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29522941-j7h5m" event={"ID":"7335be95-38b6-4ee3-9e54-ec94854cda08","Type":"ContainerStarted","Data":"48cbc448c02242a735468ae9b9dc1f319131e791c3a338ab126eb538f522b84a"} Feb 18 01:01:01 crc kubenswrapper[4791]: I0218 01:01:01.523225 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29522941-j7h5m" event={"ID":"7335be95-38b6-4ee3-9e54-ec94854cda08","Type":"ContainerStarted","Data":"b44f82151bf298ada135bcf4d600236859725f839b1425f8ca24c7f97a0ee85a"} Feb 18 01:01:01 crc kubenswrapper[4791]: I0218 01:01:01.539487 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29522941-j7h5m" podStartSLOduration=1.539469785 podStartE2EDuration="1.539469785s" podCreationTimestamp="2026-02-18 01:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:01:01.538456173 +0000 UTC m=+1603.106469343" watchObservedRunningTime="2026-02-18 01:01:01.539469785 +0000 UTC m=+1603.107482955" Feb 18 01:01:02 crc kubenswrapper[4791]: E0218 01:01:02.816341 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: reading manifest v4.18 in registry.redhat.io/redhat/redhat-marketplace-index: received unexpected HTTP status: 504 Gateway Time-out" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Feb 18 01:01:02 crc kubenswrapper[4791]: E0218 01:01:02.816505 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-772gx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-c82zr_openshift-marketplace(348a7b7d-959f-4e3e-b40f-d39facc48df0): ErrImagePull: initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: reading manifest v4.18 in registry.redhat.io/redhat/redhat-marketplace-index: received unexpected HTTP status: 504 Gateway Time-out" logger="UnhandledError" Feb 18 01:01:02 crc kubenswrapper[4791]: E0218 01:01:02.817861 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"initializing source docker://registry.redhat.io/redhat/redhat-marketplace-index:v4.18: reading manifest v4.18 in registry.redhat.io/redhat/redhat-marketplace-index: received unexpected HTTP status: 504 Gateway Time-out\"" pod="openshift-marketplace/redhat-marketplace-c82zr" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" Feb 18 01:01:03 crc kubenswrapper[4791]: E0218 01:01:03.554668 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-c82zr" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" Feb 18 01:01:04 crc kubenswrapper[4791]: I0218 01:01:04.568629 4791 generic.go:334] "Generic (PLEG): container finished" podID="7335be95-38b6-4ee3-9e54-ec94854cda08" containerID="48cbc448c02242a735468ae9b9dc1f319131e791c3a338ab126eb538f522b84a" exitCode=0 Feb 18 01:01:04 crc kubenswrapper[4791]: I0218 01:01:04.569016 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29522941-j7h5m" event={"ID":"7335be95-38b6-4ee3-9e54-ec94854cda08","Type":"ContainerDied","Data":"48cbc448c02242a735468ae9b9dc1f319131e791c3a338ab126eb538f522b84a"} Feb 18 01:01:05 crc kubenswrapper[4791]: I0218 01:01:05.085601 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Feb 18 01:01:05 crc kubenswrapper[4791]: I0218 01:01:05.221607 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: connect: connection refused" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.101448 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.139372 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xxfn\" (UniqueName: \"kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn\") pod \"7335be95-38b6-4ee3-9e54-ec94854cda08\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.139422 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys\") pod \"7335be95-38b6-4ee3-9e54-ec94854cda08\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.139524 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data\") pod \"7335be95-38b6-4ee3-9e54-ec94854cda08\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.139635 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle\") pod \"7335be95-38b6-4ee3-9e54-ec94854cda08\" (UID: \"7335be95-38b6-4ee3-9e54-ec94854cda08\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.144959 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7335be95-38b6-4ee3-9e54-ec94854cda08" (UID: "7335be95-38b6-4ee3-9e54-ec94854cda08"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.145514 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn" (OuterVolumeSpecName: "kube-api-access-5xxfn") pod "7335be95-38b6-4ee3-9e54-ec94854cda08" (UID: "7335be95-38b6-4ee3-9e54-ec94854cda08"). InnerVolumeSpecName "kube-api-access-5xxfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.226553 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data" (OuterVolumeSpecName: "config-data") pod "7335be95-38b6-4ee3-9e54-ec94854cda08" (UID: "7335be95-38b6-4ee3-9e54-ec94854cda08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.240004 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7335be95-38b6-4ee3-9e54-ec94854cda08" (UID: "7335be95-38b6-4ee3-9e54-ec94854cda08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.243422 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.243470 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.243485 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xxfn\" (UniqueName: \"kubernetes.io/projected/7335be95-38b6-4ee3-9e54-ec94854cda08-kube-api-access-5xxfn\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.243499 4791 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7335be95-38b6-4ee3-9e54-ec94854cda08-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.375966 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446513 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446713 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446779 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446808 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446946 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.446986 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jp56k\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.447043 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.447075 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.447127 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.447750 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.447830 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret\") pod \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\" (UID: \"5b450ecd-05e1-453a-a4d5-953802f0a1cf\") " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.449825 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.455291 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.457770 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.463818 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.464380 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k" (OuterVolumeSpecName: "kube-api-access-jp56k") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "kube-api-access-jp56k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.480322 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info" (OuterVolumeSpecName: "pod-info") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.480626 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.504205 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db" (OuterVolumeSpecName: "persistence") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "pvc-db75c799-c8b4-454a-9a7a-9308243c50db". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.521258 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data" (OuterVolumeSpecName: "config-data") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.530818 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf" (OuterVolumeSpecName: "server-conf") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552200 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jp56k\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-kube-api-access-jp56k\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552229 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552241 4791 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-server-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552251 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552281 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") on node \"crc\" " Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552292 4791 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5b450ecd-05e1-453a-a4d5-953802f0a1cf-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552302 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552310 4791 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5b450ecd-05e1-453a-a4d5-953802f0a1cf-pod-info\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552318 4791 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5b450ecd-05e1-453a-a4d5-953802f0a1cf-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.552326 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.586643 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.586780 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-db75c799-c8b4-454a-9a7a-9308243c50db" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db") on node "crc" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.593992 4791 generic.go:334] "Generic (PLEG): container finished" podID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerID="5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5" exitCode=0 Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.594072 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerDied","Data":"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5"} Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.594114 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"5b450ecd-05e1-453a-a4d5-953802f0a1cf","Type":"ContainerDied","Data":"ca6fba5979c814221553760960d5f3fcca7896906cfcac59791e4f82309fdf0c"} Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.594132 4791 scope.go:117] "RemoveContainer" containerID="5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.594674 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.597228 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29522941-j7h5m" event={"ID":"7335be95-38b6-4ee3-9e54-ec94854cda08","Type":"ContainerDied","Data":"b44f82151bf298ada135bcf4d600236859725f839b1425f8ca24c7f97a0ee85a"} Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.597315 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29522941-j7h5m" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.604270 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b44f82151bf298ada135bcf4d600236859725f839b1425f8ca24c7f97a0ee85a" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.619919 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5b450ecd-05e1-453a-a4d5-953802f0a1cf" (UID: "5b450ecd-05e1-453a-a4d5-953802f0a1cf"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.644450 4791 scope.go:117] "RemoveContainer" containerID="c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.654309 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5b450ecd-05e1-453a-a4d5-953802f0a1cf-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.654340 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.669935 4791 scope.go:117] "RemoveContainer" containerID="5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5" Feb 18 01:01:06 crc kubenswrapper[4791]: E0218 01:01:06.670420 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5\": container with ID starting with 5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5 not found: ID does not exist" containerID="5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.670472 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5"} err="failed to get container status \"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5\": rpc error: code = NotFound desc = could not find container \"5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5\": container with ID starting with 5bd1c5f46865028441e3b970c5220006bbd97f4d84c9ca8ecd4b5cbfd164dee5 not found: ID does not exist" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.670497 4791 scope.go:117] "RemoveContainer" containerID="c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5" Feb 18 01:01:06 crc kubenswrapper[4791]: E0218 01:01:06.670786 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5\": container with ID starting with c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5 not found: ID does not exist" containerID="c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.670857 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5"} err="failed to get container status \"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5\": rpc error: code = NotFound desc = could not find container \"c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5\": container with ID starting with c467e7539b13f4791359d094ec74565a21bb791891381b702594f37899c6d3d5 not found: ID does not exist" Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.951915 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:01:06 crc kubenswrapper[4791]: I0218 01:01:06.980470 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.004121 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:01:07 crc kubenswrapper[4791]: E0218 01:01:07.004712 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="setup-container" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.004729 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="setup-container" Feb 18 01:01:07 crc kubenswrapper[4791]: E0218 01:01:07.004743 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7335be95-38b6-4ee3-9e54-ec94854cda08" containerName="keystone-cron" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.004750 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7335be95-38b6-4ee3-9e54-ec94854cda08" containerName="keystone-cron" Feb 18 01:01:07 crc kubenswrapper[4791]: E0218 01:01:07.004780 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="rabbitmq" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.004786 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="rabbitmq" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.005009 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7335be95-38b6-4ee3-9e54-ec94854cda08" containerName="keystone-cron" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.005044 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" containerName="rabbitmq" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.006378 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.035101 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064059 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064102 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-925fk\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-kube-api-access-925fk\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064128 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064171 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064194 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064214 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064246 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-config-data\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064276 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064305 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064320 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.064369 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.076064 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b450ecd-05e1-453a-a4d5-953802f0a1cf" path="/var/lib/kubelet/pods/5b450ecd-05e1-453a-a4d5-953802f0a1cf/volumes" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.166934 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.167009 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.167052 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.167090 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168378 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168493 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168616 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-config-data\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168722 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-config-data\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168768 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168810 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168824 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.168910 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.169017 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.169060 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-925fk\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-kube-api-access-925fk\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.169634 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.170628 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.172436 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.172386 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.172924 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.173010 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d80776f0b0e6a29b061cf3e7bee3ac6a238caf9eab0bcc21880227f40c07f67b/globalmount\"" pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.174711 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.174777 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.188384 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-925fk\" (UniqueName: \"kubernetes.io/projected/37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d-kube-api-access-925fk\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.237228 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-db75c799-c8b4-454a-9a7a-9308243c50db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-db75c799-c8b4-454a-9a7a-9308243c50db\") pod \"rabbitmq-server-2\" (UID: \"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d\") " pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.340920 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Feb 18 01:01:07 crc kubenswrapper[4791]: I0218 01:01:07.874119 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Feb 18 01:01:07 crc kubenswrapper[4791]: W0218 01:01:07.909430 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37f2d008_97f2_4a1c_a9d9_8fa1df55cd2d.slice/crio-b131cb4734b6d1f34abfec253cbdc81c3e962aa3f5259e221bd7081dfcb1ba98 WatchSource:0}: Error finding container b131cb4734b6d1f34abfec253cbdc81c3e962aa3f5259e221bd7081dfcb1ba98: Status 404 returned error can't find the container with id b131cb4734b6d1f34abfec253cbdc81c3e962aa3f5259e221bd7081dfcb1ba98 Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.278772 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412407 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412469 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412530 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412554 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412574 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.412613 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tzjn\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.413787 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.413825 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.413886 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.413950 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.413972 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf\") pod \"4bb5fcf5-6cd5-4569-b788-5740edee3793\" (UID: \"4bb5fcf5-6cd5-4569-b788-5740edee3793\") " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.415887 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.417403 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.418953 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.428527 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info" (OuterVolumeSpecName: "pod-info") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.431860 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn" (OuterVolumeSpecName: "kube-api-access-5tzjn") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "kube-api-access-5tzjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.433299 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.436320 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.441594 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac" (OuterVolumeSpecName: "persistence") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.459232 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data" (OuterVolumeSpecName: "config-data") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.504410 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf" (OuterVolumeSpecName: "server-conf") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516151 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516240 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516250 4791 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516258 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516268 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516279 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tzjn\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-kube-api-access-5tzjn\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516304 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") on node \"crc\" " Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516314 4791 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4bb5fcf5-6cd5-4569-b788-5740edee3793-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516323 4791 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4bb5fcf5-6cd5-4569-b788-5740edee3793-pod-info\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.516331 4791 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4bb5fcf5-6cd5-4569-b788-5740edee3793-server-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.557104 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.557385 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac") on node "crc" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.576998 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4bb5fcf5-6cd5-4569-b788-5740edee3793" (UID: "4bb5fcf5-6cd5-4569-b788-5740edee3793"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.618184 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.618249 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4bb5fcf5-6cd5-4569-b788-5740edee3793-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.626773 4791 generic.go:334] "Generic (PLEG): container finished" podID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerID="a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604" exitCode=0 Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.626845 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerDied","Data":"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604"} Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.626855 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.626876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4bb5fcf5-6cd5-4569-b788-5740edee3793","Type":"ContainerDied","Data":"e6b0d7e7a41b55e4e7d45a2cbb67fda4251545703d01d1523c8bfcf2efa50be6"} Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.626897 4791 scope.go:117] "RemoveContainer" containerID="a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.629315 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d","Type":"ContainerStarted","Data":"b131cb4734b6d1f34abfec253cbdc81c3e962aa3f5259e221bd7081dfcb1ba98"} Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.651570 4791 scope.go:117] "RemoveContainer" containerID="8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.664314 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.686712 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.700782 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:01:08 crc kubenswrapper[4791]: E0218 01:01:08.701520 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="setup-container" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.701544 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="setup-container" Feb 18 01:01:08 crc kubenswrapper[4791]: E0218 01:01:08.701588 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="rabbitmq" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.701597 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="rabbitmq" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.701912 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" containerName="rabbitmq" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.703958 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.710553 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.710855 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.719118 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.722045 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.722095 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.722190 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.722045 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-w7wn5" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.722391 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.725663 4791 scope.go:117] "RemoveContainer" containerID="a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604" Feb 18 01:01:08 crc kubenswrapper[4791]: E0218 01:01:08.726190 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604\": container with ID starting with a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604 not found: ID does not exist" containerID="a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.726282 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604"} err="failed to get container status \"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604\": rpc error: code = NotFound desc = could not find container \"a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604\": container with ID starting with a3a58d7367bdc2390499ae1197865472c365818400bec1809ff6593cd43c9604 not found: ID does not exist" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.726361 4791 scope.go:117] "RemoveContainer" containerID="8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad" Feb 18 01:01:08 crc kubenswrapper[4791]: E0218 01:01:08.728063 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad\": container with ID starting with 8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad not found: ID does not exist" containerID="8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.728093 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad"} err="failed to get container status \"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad\": rpc error: code = NotFound desc = could not find container \"8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad\": container with ID starting with 8fc686486368da1e0d773d98fc965789987967d981714709a5bcb76c4a7714ad not found: ID does not exist" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.822785 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.822921 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823039 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/96310c28-c67e-463a-ab1e-beb273a7434e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823462 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823557 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823609 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfrfs\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-kube-api-access-kfrfs\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823744 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.823911 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.824039 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.824233 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.824274 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/96310c28-c67e-463a-ab1e-beb273a7434e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926686 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926735 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/96310c28-c67e-463a-ab1e-beb273a7434e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926776 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926801 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926836 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/96310c28-c67e-463a-ab1e-beb273a7434e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926866 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926895 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926918 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfrfs\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-kube-api-access-kfrfs\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.926963 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.927010 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.927053 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.927975 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.928373 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.928583 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.928881 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.929242 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/96310c28-c67e-463a-ab1e-beb273a7434e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.933770 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.934233 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/96310c28-c67e-463a-ab1e-beb273a7434e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.934487 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.934528 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bda4c13c6af3a740a356a538e78a89662164c12658c511cf721ff5cb1d0d32f4/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.935911 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/96310c28-c67e-463a-ab1e-beb273a7434e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.937797 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:08 crc kubenswrapper[4791]: I0218 01:01:08.948051 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfrfs\" (UniqueName: \"kubernetes.io/projected/96310c28-c67e-463a-ab1e-beb273a7434e-kube-api-access-kfrfs\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.004067 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a1afe60e-af4f-4b7f-b712-986fa012f8ac\") pod \"rabbitmq-cell1-server-0\" (UID: \"96310c28-c67e-463a-ab1e-beb273a7434e\") " pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.027715 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.029535 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.030887 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.034194 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.040536 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.079221 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb5fcf5-6cd5-4569-b788-5740edee3793" path="/var/lib/kubelet/pods/4bb5fcf5-6cd5-4569-b788-5740edee3793/volumes" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132612 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132682 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8zqb\" (UniqueName: \"kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132718 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132760 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132821 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.132951 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.133088 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236075 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236374 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8zqb\" (UniqueName: \"kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236398 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236441 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236494 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.236605 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.237441 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.237480 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.237498 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.238202 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.239603 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.241725 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.241842 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.263847 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8zqb\" (UniqueName: \"kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb\") pod \"dnsmasq-dns-68df85789f-jsklv\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.557636 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.647175 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"96310c28-c67e-463a-ab1e-beb273a7434e","Type":"ContainerStarted","Data":"860804127edf24cbd1bec36594bb97dfe969a75fd26bef2fbdb04be646f112d6"} Feb 18 01:01:09 crc kubenswrapper[4791]: I0218 01:01:09.896863 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:10 crc kubenswrapper[4791]: E0218 01:01:10.198845 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:01:10 crc kubenswrapper[4791]: E0218 01:01:10.199178 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:01:10 crc kubenswrapper[4791]: E0218 01:01:10.199307 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:01:10 crc kubenswrapper[4791]: E0218 01:01:10.200711 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:01:10 crc kubenswrapper[4791]: I0218 01:01:10.459656 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:10 crc kubenswrapper[4791]: I0218 01:01:10.659580 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jsklv" event={"ID":"151ecafd-43d4-496a-9262-c68d93bed5de","Type":"ContainerStarted","Data":"eb653deb940c56484d1ebaa04b3c0ed84c0f775f9aeacca1bce9272a2ee20b76"} Feb 18 01:01:10 crc kubenswrapper[4791]: I0218 01:01:10.665857 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d","Type":"ContainerStarted","Data":"6ff7541f29295439ea21aa0c4cd09b78358786b4fb1890cbb34d459fb5cb8bab"} Feb 18 01:01:11 crc kubenswrapper[4791]: I0218 01:01:11.682355 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"96310c28-c67e-463a-ab1e-beb273a7434e","Type":"ContainerStarted","Data":"f273a5cb3268e37795808a7e82988ecd620898d98aac288b85382da59c8e828d"} Feb 18 01:01:11 crc kubenswrapper[4791]: I0218 01:01:11.686709 4791 generic.go:334] "Generic (PLEG): container finished" podID="151ecafd-43d4-496a-9262-c68d93bed5de" containerID="131777a9578c4fab6926b89e0fe2c148adde9307b5d0419e64798a86ca8c6f81" exitCode=0 Feb 18 01:01:11 crc kubenswrapper[4791]: I0218 01:01:11.686769 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jsklv" event={"ID":"151ecafd-43d4-496a-9262-c68d93bed5de","Type":"ContainerDied","Data":"131777a9578c4fab6926b89e0fe2c148adde9307b5d0419e64798a86ca8c6f81"} Feb 18 01:01:12 crc kubenswrapper[4791]: I0218 01:01:12.704985 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jsklv" event={"ID":"151ecafd-43d4-496a-9262-c68d93bed5de","Type":"ContainerStarted","Data":"f929872188aa00e818eba252ba21aa45859f51c5b860d778f587176d63ed6434"} Feb 18 01:01:12 crc kubenswrapper[4791]: I0218 01:01:12.731809 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-jsklv" podStartSLOduration=3.731783755 podStartE2EDuration="3.731783755s" podCreationTimestamp="2026-02-18 01:01:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:01:12.726995467 +0000 UTC m=+1614.295008677" watchObservedRunningTime="2026-02-18 01:01:12.731783755 +0000 UTC m=+1614.299796935" Feb 18 01:01:13 crc kubenswrapper[4791]: I0218 01:01:13.715047 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.781852 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerStarted","Data":"c335bbce2638ec0ff3015dff9faf44c5f1d4d1362de8a1ead007ead2a8c2cafe"} Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.782038 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-central-agent" containerID="cri-o://645d82e19cf67227a6cc3840d9814a8717abac72619fae8e06c113395b996af2" gracePeriod=30 Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.782121 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="proxy-httpd" containerID="cri-o://c335bbce2638ec0ff3015dff9faf44c5f1d4d1362de8a1ead007ead2a8c2cafe" gracePeriod=30 Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.782553 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.782297 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="sg-core" containerID="cri-o://99f5b0bcd78611195226e82a4917bf1ec49ac3467917762b14aeb02397478504" gracePeriod=30 Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.784124 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-notification-agent" containerID="cri-o://dd8f0cb3bd8ebeac41acf4143937ae5931d38504296c21e1781a99e3a7c10d04" gracePeriod=30 Feb 18 01:01:18 crc kubenswrapper[4791]: I0218 01:01:18.812638 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.521877909 podStartE2EDuration="28.81261922s" podCreationTimestamp="2026-02-18 01:00:50 +0000 UTC" firstStartedPulling="2026-02-18 01:00:51.511487788 +0000 UTC m=+1593.079500958" lastFinishedPulling="2026-02-18 01:01:17.802229089 +0000 UTC m=+1619.370242269" observedRunningTime="2026-02-18 01:01:18.809971348 +0000 UTC m=+1620.377984518" watchObservedRunningTime="2026-02-18 01:01:18.81261922 +0000 UTC m=+1620.380632410" Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799126 4791 generic.go:334] "Generic (PLEG): container finished" podID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerID="c335bbce2638ec0ff3015dff9faf44c5f1d4d1362de8a1ead007ead2a8c2cafe" exitCode=0 Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799461 4791 generic.go:334] "Generic (PLEG): container finished" podID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerID="99f5b0bcd78611195226e82a4917bf1ec49ac3467917762b14aeb02397478504" exitCode=2 Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799470 4791 generic.go:334] "Generic (PLEG): container finished" podID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerID="645d82e19cf67227a6cc3840d9814a8717abac72619fae8e06c113395b996af2" exitCode=0 Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799190 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerDied","Data":"c335bbce2638ec0ff3015dff9faf44c5f1d4d1362de8a1ead007ead2a8c2cafe"} Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799507 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerDied","Data":"99f5b0bcd78611195226e82a4917bf1ec49ac3467917762b14aeb02397478504"} Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.799521 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerDied","Data":"645d82e19cf67227a6cc3840d9814a8717abac72619fae8e06c113395b996af2"} Feb 18 01:01:19 crc kubenswrapper[4791]: I0218 01:01:19.898947 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.002247 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.002442 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="dnsmasq-dns" containerID="cri-o://ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7" gracePeriod=10 Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.165183 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fgcpw"] Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.167011 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.181983 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fgcpw"] Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.318996 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwfq2\" (UniqueName: \"kubernetes.io/projected/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-kube-api-access-pwfq2\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319416 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319440 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319505 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-config\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319556 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319599 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.319631 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422424 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-config\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422530 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422619 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422680 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422772 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwfq2\" (UniqueName: \"kubernetes.io/projected/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-kube-api-access-pwfq2\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422950 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.422996 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.423326 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-config\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.424649 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.425736 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.426396 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-svc\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.426675 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.426752 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.455861 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwfq2\" (UniqueName: \"kubernetes.io/projected/9d4d756f-d3c4-4fd5-a75e-0df5c33004fb-kube-api-access-pwfq2\") pod \"dnsmasq-dns-bb85b8995-fgcpw\" (UID: \"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb\") " pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.509842 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.705545 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.817582 4791 generic.go:334] "Generic (PLEG): container finished" podID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerID="ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7" exitCode=0 Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.817623 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" event={"ID":"3d564d20-a048-4a0b-93c5-6b2b1dd278f8","Type":"ContainerDied","Data":"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7"} Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.817652 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" event={"ID":"3d564d20-a048-4a0b-93c5-6b2b1dd278f8","Type":"ContainerDied","Data":"1d7c4590afd53498f12e2db4667d2b7a8b30948e342666447b619a46e5e79c4a"} Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.817670 4791 scope.go:117] "RemoveContainer" containerID="ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.817844 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832064 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832241 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832297 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832341 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832494 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjt2w\" (UniqueName: \"kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.832846 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc\") pod \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\" (UID: \"3d564d20-a048-4a0b-93c5-6b2b1dd278f8\") " Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.837716 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w" (OuterVolumeSpecName: "kube-api-access-fjt2w") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "kube-api-access-fjt2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.846299 4791 scope.go:117] "RemoveContainer" containerID="2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.901632 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.909439 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.923418 4791 scope.go:117] "RemoveContainer" containerID="ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7" Feb 18 01:01:20 crc kubenswrapper[4791]: E0218 01:01:20.923753 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7\": container with ID starting with ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7 not found: ID does not exist" containerID="ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.923786 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7"} err="failed to get container status \"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7\": rpc error: code = NotFound desc = could not find container \"ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7\": container with ID starting with ebf925a19e35e92570426048f941cfb36885dd9f889bcc8f6b01dbb33b9e9ff7 not found: ID does not exist" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.923806 4791 scope.go:117] "RemoveContainer" containerID="2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed" Feb 18 01:01:20 crc kubenswrapper[4791]: E0218 01:01:20.923992 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed\": container with ID starting with 2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed not found: ID does not exist" containerID="2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.924015 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed"} err="failed to get container status \"2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed\": rpc error: code = NotFound desc = could not find container \"2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed\": container with ID starting with 2bf93004ddd7cc4f22bcb47d5e1cdf54a7eebad8eb0aeb1166edc2589c048aed not found: ID does not exist" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.935922 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.935956 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.935966 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjt2w\" (UniqueName: \"kubernetes.io/projected/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-kube-api-access-fjt2w\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.964238 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.970632 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config" (OuterVolumeSpecName: "config") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:20 crc kubenswrapper[4791]: I0218 01:01:20.978946 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d564d20-a048-4a0b-93c5-6b2b1dd278f8" (UID: "3d564d20-a048-4a0b-93c5-6b2b1dd278f8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.037621 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.037655 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-config\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.037665 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d564d20-a048-4a0b-93c5-6b2b1dd278f8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.051602 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-fgcpw"] Feb 18 01:01:21 crc kubenswrapper[4791]: E0218 01:01:21.062829 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.300142 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.310969 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-b7s2x"] Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.833515 4791 generic.go:334] "Generic (PLEG): container finished" podID="9d4d756f-d3c4-4fd5-a75e-0df5c33004fb" containerID="75a0f1f176c0a4771f77f69995e0d22536b52559639596c326f88f490e30a7fb" exitCode=0 Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.833557 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" event={"ID":"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb","Type":"ContainerDied","Data":"75a0f1f176c0a4771f77f69995e0d22536b52559639596c326f88f490e30a7fb"} Feb 18 01:01:21 crc kubenswrapper[4791]: I0218 01:01:21.833580 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" event={"ID":"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb","Type":"ContainerStarted","Data":"cc0507daf2a650b0cc9e62d081b5c13963cb73475ae8fb5e680ea0b85c1534ff"} Feb 18 01:01:22 crc kubenswrapper[4791]: I0218 01:01:22.849272 4791 generic.go:334] "Generic (PLEG): container finished" podID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerID="dd8f0cb3bd8ebeac41acf4143937ae5931d38504296c21e1781a99e3a7c10d04" exitCode=0 Feb 18 01:01:22 crc kubenswrapper[4791]: I0218 01:01:22.849332 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerDied","Data":"dd8f0cb3bd8ebeac41acf4143937ae5931d38504296c21e1781a99e3a7c10d04"} Feb 18 01:01:22 crc kubenswrapper[4791]: I0218 01:01:22.853799 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" event={"ID":"9d4d756f-d3c4-4fd5-a75e-0df5c33004fb","Type":"ContainerStarted","Data":"2d58748444bd5955c3bc8ef26b39f6dc76a8048de73fbcae7c45a17d61dd5c8e"} Feb 18 01:01:22 crc kubenswrapper[4791]: I0218 01:01:22.854258 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:22 crc kubenswrapper[4791]: I0218 01:01:22.889954 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" podStartSLOduration=2.889931728 podStartE2EDuration="2.889931728s" podCreationTimestamp="2026-02-18 01:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:01:22.875948486 +0000 UTC m=+1624.443961686" watchObservedRunningTime="2026-02-18 01:01:22.889931728 +0000 UTC m=+1624.457944908" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.065585 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.080583 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" path="/var/lib/kubelet/pods/3d564d20-a048-4a0b-93c5-6b2b1dd278f8/volumes" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.081998 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082057 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082143 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082230 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082297 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4zt9\" (UniqueName: \"kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082379 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082444 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082485 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd\") pod \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\" (UID: \"82b5d66f-f75d-41d1-be23-9e6b60446ad5\") " Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.082980 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.084094 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.088406 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9" (OuterVolumeSpecName: "kube-api-access-z4zt9") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "kube-api-access-z4zt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.095554 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts" (OuterVolumeSpecName: "scripts") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.160546 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.186006 4791 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-log-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.186038 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4zt9\" (UniqueName: \"kubernetes.io/projected/82b5d66f-f75d-41d1-be23-9e6b60446ad5-kube-api-access-z4zt9\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.186050 4791 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-scripts\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.186058 4791 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.186069 4791 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82b5d66f-f75d-41d1-be23-9e6b60446ad5-run-httpd\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.189913 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.213342 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.253265 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data" (OuterVolumeSpecName: "config-data") pod "82b5d66f-f75d-41d1-be23-9e6b60446ad5" (UID: "82b5d66f-f75d-41d1-be23-9e6b60446ad5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.288268 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.288487 4791 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.288577 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82b5d66f-f75d-41d1-be23-9e6b60446ad5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.866265 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.866257 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82b5d66f-f75d-41d1-be23-9e6b60446ad5","Type":"ContainerDied","Data":"f872ba81222a223199ae6dfc7eb01c197de1a86e4261e7c28eb0e6276d3076fb"} Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.866819 4791 scope.go:117] "RemoveContainer" containerID="c335bbce2638ec0ff3015dff9faf44c5f1d4d1362de8a1ead007ead2a8c2cafe" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.904095 4791 scope.go:117] "RemoveContainer" containerID="99f5b0bcd78611195226e82a4917bf1ec49ac3467917762b14aeb02397478504" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.916589 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.935181 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.980923 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981515 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="sg-core" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981540 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="sg-core" Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981554 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-notification-agent" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981564 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-notification-agent" Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981581 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-central-agent" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981590 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-central-agent" Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981607 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="init" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981615 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="init" Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981659 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="dnsmasq-dns" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981681 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="dnsmasq-dns" Feb 18 01:01:23 crc kubenswrapper[4791]: E0218 01:01:23.981704 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="proxy-httpd" Feb 18 01:01:23 crc kubenswrapper[4791]: I0218 01:01:23.981714 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="proxy-httpd" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.018651 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="sg-core" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.018799 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="proxy-httpd" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.018847 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="dnsmasq-dns" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.018878 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-notification-agent" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.018905 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" containerName="ceilometer-central-agent" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.020879 4791 scope.go:117] "RemoveContainer" containerID="dd8f0cb3bd8ebeac41acf4143937ae5931d38504296c21e1781a99e3a7c10d04" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.028555 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.028651 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.030799 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.030961 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.031020 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.070061 4791 scope.go:117] "RemoveContainer" containerID="645d82e19cf67227a6cc3840d9814a8717abac72619fae8e06c113395b996af2" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120176 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-run-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120228 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-scripts\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120363 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-log-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120820 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120872 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np7gr\" (UniqueName: \"kubernetes.io/projected/4b9cec47-aeda-40f0-b83e-46f09ce65e95-kube-api-access-np7gr\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.120935 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.121040 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-config-data\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.121927 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.223487 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.223605 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-run-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.223629 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-scripts\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.223720 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-log-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.223743 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.224014 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-run-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.224060 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np7gr\" (UniqueName: \"kubernetes.io/projected/4b9cec47-aeda-40f0-b83e-46f09ce65e95-kube-api-access-np7gr\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.224370 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4b9cec47-aeda-40f0-b83e-46f09ce65e95-log-httpd\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.224386 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.224609 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-config-data\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.227921 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-scripts\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.228490 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.228574 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.229683 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.245044 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b9cec47-aeda-40f0-b83e-46f09ce65e95-config-data\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.254731 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np7gr\" (UniqueName: \"kubernetes.io/projected/4b9cec47-aeda-40f0-b83e-46f09ce65e95-kube-api-access-np7gr\") pod \"ceilometer-0\" (UID: \"4b9cec47-aeda-40f0-b83e-46f09ce65e95\") " pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.346345 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.878224 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Feb 18 01:01:24 crc kubenswrapper[4791]: I0218 01:01:24.879699 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4b9cec47-aeda-40f0-b83e-46f09ce65e95","Type":"ContainerStarted","Data":"2b2a15096af9943f505e744901e1ef4610855942a036fb66406a1f42fcf44f97"} Feb 18 01:01:25 crc kubenswrapper[4791]: E0218 01:01:25.021821 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:01:25 crc kubenswrapper[4791]: E0218 01:01:25.021889 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:01:25 crc kubenswrapper[4791]: E0218 01:01:25.022038 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:01:25 crc kubenswrapper[4791]: I0218 01:01:25.075087 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82b5d66f-f75d-41d1-be23-9e6b60446ad5" path="/var/lib/kubelet/pods/82b5d66f-f75d-41d1-be23-9e6b60446ad5/volumes" Feb 18 01:01:25 crc kubenswrapper[4791]: I0218 01:01:25.421919 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-79b5d74c8c-b7s2x" podUID="3d564d20-a048-4a0b-93c5-6b2b1dd278f8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.4:5353: i/o timeout" Feb 18 01:01:25 crc kubenswrapper[4791]: I0218 01:01:25.890971 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4b9cec47-aeda-40f0-b83e-46f09ce65e95","Type":"ContainerStarted","Data":"71771acf0ea4ca4c403e839112f4bc6155cde8d6119d2d86b2e2a43de6c2bccb"} Feb 18 01:01:26 crc kubenswrapper[4791]: I0218 01:01:26.800097 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:01:26 crc kubenswrapper[4791]: I0218 01:01:26.800409 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:01:26 crc kubenswrapper[4791]: I0218 01:01:26.909670 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4b9cec47-aeda-40f0-b83e-46f09ce65e95","Type":"ContainerStarted","Data":"3fda689c1f6971d366dc3a78cb557883fcbfab985a3ec90995e69f39619447ce"} Feb 18 01:01:29 crc kubenswrapper[4791]: I0218 01:01:29.949370 4791 generic.go:334] "Generic (PLEG): container finished" podID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerID="7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e" exitCode=0 Feb 18 01:01:29 crc kubenswrapper[4791]: I0218 01:01:29.949565 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerDied","Data":"7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e"} Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.512366 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-fgcpw" Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.588292 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.588532 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-jsklv" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="dnsmasq-dns" containerID="cri-o://f929872188aa00e818eba252ba21aa45859f51c5b860d778f587176d63ed6434" gracePeriod=10 Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.975404 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerStarted","Data":"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828"} Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.983997 4791 generic.go:334] "Generic (PLEG): container finished" podID="151ecafd-43d4-496a-9262-c68d93bed5de" containerID="f929872188aa00e818eba252ba21aa45859f51c5b860d778f587176d63ed6434" exitCode=0 Feb 18 01:01:30 crc kubenswrapper[4791]: I0218 01:01:30.984032 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jsklv" event={"ID":"151ecafd-43d4-496a-9262-c68d93bed5de","Type":"ContainerDied","Data":"f929872188aa00e818eba252ba21aa45859f51c5b860d778f587176d63ed6434"} Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.001632 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c82zr" podStartSLOduration=3.262717936 podStartE2EDuration="1m31.001611915s" podCreationTimestamp="2026-02-18 01:00:00 +0000 UTC" firstStartedPulling="2026-02-18 01:00:02.625992273 +0000 UTC m=+1544.194005443" lastFinishedPulling="2026-02-18 01:01:30.364886222 +0000 UTC m=+1631.932899422" observedRunningTime="2026-02-18 01:01:30.992012538 +0000 UTC m=+1632.560025708" watchObservedRunningTime="2026-02-18 01:01:31.001611915 +0000 UTC m=+1632.569625085" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.176421 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.181038 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.232593 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357357 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8zqb\" (UniqueName: \"kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357453 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357486 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357519 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357573 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357722 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.357740 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam\") pod \"151ecafd-43d4-496a-9262-c68d93bed5de\" (UID: \"151ecafd-43d4-496a-9262-c68d93bed5de\") " Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.388274 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb" (OuterVolumeSpecName: "kube-api-access-k8zqb") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "kube-api-access-k8zqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.425845 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.433191 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.454967 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.457606 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config" (OuterVolumeSpecName: "config") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.460655 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.460676 4791 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.460685 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8zqb\" (UniqueName: \"kubernetes.io/projected/151ecafd-43d4-496a-9262-c68d93bed5de-kube-api-access-k8zqb\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.460695 4791 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-config\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.460704 4791 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.463236 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.474238 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "151ecafd-43d4-496a-9262-c68d93bed5de" (UID: "151ecafd-43d4-496a-9262-c68d93bed5de"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.563485 4791 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-svc\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.563519 4791 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/151ecafd-43d4-496a-9262-c68d93bed5de-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.997144 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-jsklv" event={"ID":"151ecafd-43d4-496a-9262-c68d93bed5de","Type":"ContainerDied","Data":"eb653deb940c56484d1ebaa04b3c0ed84c0f775f9aeacca1bce9272a2ee20b76"} Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.997205 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-jsklv" Feb 18 01:01:31 crc kubenswrapper[4791]: I0218 01:01:31.997240 4791 scope.go:117] "RemoveContainer" containerID="f929872188aa00e818eba252ba21aa45859f51c5b860d778f587176d63ed6434" Feb 18 01:01:32 crc kubenswrapper[4791]: I0218 01:01:32.023878 4791 scope.go:117] "RemoveContainer" containerID="131777a9578c4fab6926b89e0fe2c148adde9307b5d0419e64798a86ca8c6f81" Feb 18 01:01:32 crc kubenswrapper[4791]: I0218 01:01:32.031108 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:32 crc kubenswrapper[4791]: I0218 01:01:32.042101 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-jsklv"] Feb 18 01:01:32 crc kubenswrapper[4791]: I0218 01:01:32.240038 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-c82zr" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="registry-server" probeResult="failure" output=< Feb 18 01:01:32 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:01:32 crc kubenswrapper[4791]: > Feb 18 01:01:33 crc kubenswrapper[4791]: I0218 01:01:33.087086 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" path="/var/lib/kubelet/pods/151ecafd-43d4-496a-9262-c68d93bed5de/volumes" Feb 18 01:01:33 crc kubenswrapper[4791]: E0218 01:01:33.168630 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:01:33 crc kubenswrapper[4791]: E0218 01:01:33.168682 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:01:33 crc kubenswrapper[4791]: E0218 01:01:33.168804 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:01:33 crc kubenswrapper[4791]: E0218 01:01:33.169962 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:01:39 crc kubenswrapper[4791]: E0218 01:01:39.081470 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:01:39 crc kubenswrapper[4791]: I0218 01:01:39.105318 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4b9cec47-aeda-40f0-b83e-46f09ce65e95","Type":"ContainerStarted","Data":"e870558cce2f06464fc280e8e1e3354283f88405fb1a39411c9105e0e5ad360b"} Feb 18 01:01:39 crc kubenswrapper[4791]: I0218 01:01:39.105375 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Feb 18 01:01:39 crc kubenswrapper[4791]: E0218 01:01:39.214955 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:01:39 crc kubenswrapper[4791]: E0218 01:01:39.215265 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:01:39 crc kubenswrapper[4791]: E0218 01:01:39.215433 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:01:39 crc kubenswrapper[4791]: E0218 01:01:39.216794 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:01:40 crc kubenswrapper[4791]: E0218 01:01:40.106414 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:01:41 crc kubenswrapper[4791]: I0218 01:01:41.327646 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:41 crc kubenswrapper[4791]: I0218 01:01:41.377179 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:41 crc kubenswrapper[4791]: I0218 01:01:41.577651 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.140530 4791 generic.go:334] "Generic (PLEG): container finished" podID="37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d" containerID="6ff7541f29295439ea21aa0c4cd09b78358786b4fb1890cbb34d459fb5cb8bab" exitCode=0 Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.140599 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d","Type":"ContainerDied","Data":"6ff7541f29295439ea21aa0c4cd09b78358786b4fb1890cbb34d459fb5cb8bab"} Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.141100 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c82zr" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="registry-server" containerID="cri-o://34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828" gracePeriod=2 Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.747339 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.881085 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-772gx\" (UniqueName: \"kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx\") pod \"348a7b7d-959f-4e3e-b40f-d39facc48df0\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.881322 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities\") pod \"348a7b7d-959f-4e3e-b40f-d39facc48df0\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.881433 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content\") pod \"348a7b7d-959f-4e3e-b40f-d39facc48df0\" (UID: \"348a7b7d-959f-4e3e-b40f-d39facc48df0\") " Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.882340 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities" (OuterVolumeSpecName: "utilities") pod "348a7b7d-959f-4e3e-b40f-d39facc48df0" (UID: "348a7b7d-959f-4e3e-b40f-d39facc48df0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.885929 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx" (OuterVolumeSpecName: "kube-api-access-772gx") pod "348a7b7d-959f-4e3e-b40f-d39facc48df0" (UID: "348a7b7d-959f-4e3e-b40f-d39facc48df0"). InnerVolumeSpecName "kube-api-access-772gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.904647 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "348a7b7d-959f-4e3e-b40f-d39facc48df0" (UID: "348a7b7d-959f-4e3e-b40f-d39facc48df0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.984675 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.984719 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/348a7b7d-959f-4e3e-b40f-d39facc48df0-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:43 crc kubenswrapper[4791]: I0218 01:01:43.984734 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-772gx\" (UniqueName: \"kubernetes.io/projected/348a7b7d-959f-4e3e-b40f-d39facc48df0-kube-api-access-772gx\") on node \"crc\" DevicePath \"\"" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.159527 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d","Type":"ContainerStarted","Data":"c66c37f358f254bff698380617522248f3da44406ae117b097e93d7805193560"} Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.161287 4791 generic.go:334] "Generic (PLEG): container finished" podID="96310c28-c67e-463a-ab1e-beb273a7434e" containerID="f273a5cb3268e37795808a7e82988ecd620898d98aac288b85382da59c8e828d" exitCode=0 Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.161359 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"96310c28-c67e-463a-ab1e-beb273a7434e","Type":"ContainerDied","Data":"f273a5cb3268e37795808a7e82988ecd620898d98aac288b85382da59c8e828d"} Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.164944 4791 generic.go:334] "Generic (PLEG): container finished" podID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerID="34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828" exitCode=0 Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.164973 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerDied","Data":"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828"} Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.164992 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c82zr" event={"ID":"348a7b7d-959f-4e3e-b40f-d39facc48df0","Type":"ContainerDied","Data":"7d07d291dccd195dc0de230caa77eee2b900059f51b1cd8b0a1031ddbcc57b07"} Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.165021 4791 scope.go:117] "RemoveContainer" containerID="34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.165056 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c82zr" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.203202 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=38.203146688 podStartE2EDuration="38.203146688s" podCreationTimestamp="2026-02-18 01:01:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:01:44.185181501 +0000 UTC m=+1645.753194671" watchObservedRunningTime="2026-02-18 01:01:44.203146688 +0000 UTC m=+1645.771159898" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.318180 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.322449 4791 scope.go:117] "RemoveContainer" containerID="7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.327512 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c82zr"] Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.369270 4791 scope.go:117] "RemoveContainer" containerID="879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.442970 4791 scope.go:117] "RemoveContainer" containerID="34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.443392 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828\": container with ID starting with 34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828 not found: ID does not exist" containerID="34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.443440 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828"} err="failed to get container status \"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828\": rpc error: code = NotFound desc = could not find container \"34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828\": container with ID starting with 34a82aeeb646dda1292f4b35e74c6ddf56bc97f4636ce7ad1ebea46e75aa2828 not found: ID does not exist" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.443467 4791 scope.go:117] "RemoveContainer" containerID="7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.443782 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e\": container with ID starting with 7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e not found: ID does not exist" containerID="7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.443826 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e"} err="failed to get container status \"7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e\": rpc error: code = NotFound desc = could not find container \"7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e\": container with ID starting with 7e3e523ed11dc8c54b2a957393eba3c4b2bed1e10a28229772ae43db02ebea5e not found: ID does not exist" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.443853 4791 scope.go:117] "RemoveContainer" containerID="879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.444134 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475\": container with ID starting with 879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475 not found: ID does not exist" containerID="879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.444192 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475"} err="failed to get container status \"879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475\": rpc error: code = NotFound desc = could not find container \"879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475\": container with ID starting with 879e7ab83fde88328d4d27ced33732acf56efa4db37d8968c680597c63fb9475 not found: ID does not exist" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.602101 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt"] Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.602936 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="dnsmasq-dns" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.602952 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="dnsmasq-dns" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.602975 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="extract-content" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.602982 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="extract-content" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.602998 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="init" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.603004 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="init" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.603022 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="registry-server" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.603028 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="registry-server" Feb 18 01:01:44 crc kubenswrapper[4791]: E0218 01:01:44.603055 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="extract-utilities" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.603061 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="extract-utilities" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.603450 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" containerName="registry-server" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.603474 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="151ecafd-43d4-496a-9262-c68d93bed5de" containerName="dnsmasq-dns" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.606480 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.610211 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.610236 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.610414 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.613626 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.620266 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt"] Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.700094 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.700141 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.700663 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.700781 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g46c\" (UniqueName: \"kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.803804 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.803880 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g46c\" (UniqueName: \"kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.804006 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.804042 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.809361 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.809392 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.809524 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.821906 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g46c\" (UniqueName: \"kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:44 crc kubenswrapper[4791]: I0218 01:01:44.931661 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:01:45 crc kubenswrapper[4791]: I0218 01:01:45.079539 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="348a7b7d-959f-4e3e-b40f-d39facc48df0" path="/var/lib/kubelet/pods/348a7b7d-959f-4e3e-b40f-d39facc48df0/volumes" Feb 18 01:01:45 crc kubenswrapper[4791]: I0218 01:01:45.179456 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"96310c28-c67e-463a-ab1e-beb273a7434e","Type":"ContainerStarted","Data":"b29535d0cda1de1ecc236c19f401255b26bb73a47b55a34b5398d5661110798e"} Feb 18 01:01:45 crc kubenswrapper[4791]: I0218 01:01:45.182900 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:45 crc kubenswrapper[4791]: I0218 01:01:45.211474 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.211449123 podStartE2EDuration="37.211449123s" podCreationTimestamp="2026-02-18 01:01:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:01:45.208121851 +0000 UTC m=+1646.776135061" watchObservedRunningTime="2026-02-18 01:01:45.211449123 +0000 UTC m=+1646.779462313" Feb 18 01:01:45 crc kubenswrapper[4791]: I0218 01:01:45.744480 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt"] Feb 18 01:01:45 crc kubenswrapper[4791]: W0218 01:01:45.754409 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25084610_eb17_4a22_bb76_3b67b38e4402.slice/crio-ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f WatchSource:0}: Error finding container ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f: Status 404 returned error can't find the container with id ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f Feb 18 01:01:46 crc kubenswrapper[4791]: E0218 01:01:46.062280 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:01:46 crc kubenswrapper[4791]: I0218 01:01:46.196943 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" event={"ID":"25084610-eb17-4a22-bb76-3b67b38e4402","Type":"ContainerStarted","Data":"ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f"} Feb 18 01:01:47 crc kubenswrapper[4791]: I0218 01:01:47.341262 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Feb 18 01:01:53 crc kubenswrapper[4791]: E0218 01:01:53.085607 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:01:53 crc kubenswrapper[4791]: I0218 01:01:53.100251 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Feb 18 01:01:53 crc kubenswrapper[4791]: E0218 01:01:53.275542 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.758815 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.761591 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.777030 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.815966 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzchs\" (UniqueName: \"kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.816117 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.816144 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.917592 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.917636 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.917788 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzchs\" (UniqueName: \"kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.918440 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.918468 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:54 crc kubenswrapper[4791]: I0218 01:01:54.948952 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzchs\" (UniqueName: \"kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs\") pod \"community-operators-8c989\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:55 crc kubenswrapper[4791]: I0218 01:01:55.091930 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:01:56 crc kubenswrapper[4791]: I0218 01:01:56.800240 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:01:56 crc kubenswrapper[4791]: I0218 01:01:56.800770 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:01:56 crc kubenswrapper[4791]: W0218 01:01:56.855459 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bb93696_a43f_46ef_a640_cb34d7607c6f.slice/crio-5297f81f557290c7012b177d32cde7642984239c6ef7e0b1e6e6ccf906221fcc WatchSource:0}: Error finding container 5297f81f557290c7012b177d32cde7642984239c6ef7e0b1e6e6ccf906221fcc: Status 404 returned error can't find the container with id 5297f81f557290c7012b177d32cde7642984239c6ef7e0b1e6e6ccf906221fcc Feb 18 01:01:56 crc kubenswrapper[4791]: I0218 01:01:56.867691 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:01:57 crc kubenswrapper[4791]: E0218 01:01:57.070943 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.324574 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerID="2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883" exitCode=0 Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.324663 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerDied","Data":"2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883"} Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.324697 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerStarted","Data":"5297f81f557290c7012b177d32cde7642984239c6ef7e0b1e6e6ccf906221fcc"} Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.327649 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" event={"ID":"25084610-eb17-4a22-bb76-3b67b38e4402","Type":"ContainerStarted","Data":"4913b2c678419d477ee490915969ca69490f1b07b0e3080e1c6fd29d012b3cd6"} Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.343627 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.418707 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" podStartSLOduration=2.686932947 podStartE2EDuration="13.418679792s" podCreationTimestamp="2026-02-18 01:01:44 +0000 UTC" firstStartedPulling="2026-02-18 01:01:45.758263285 +0000 UTC m=+1647.326276455" lastFinishedPulling="2026-02-18 01:01:56.49001013 +0000 UTC m=+1658.058023300" observedRunningTime="2026-02-18 01:01:57.395144704 +0000 UTC m=+1658.963157884" watchObservedRunningTime="2026-02-18 01:01:57.418679792 +0000 UTC m=+1658.986692962" Feb 18 01:01:57 crc kubenswrapper[4791]: I0218 01:01:57.436441 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:01:59 crc kubenswrapper[4791]: I0218 01:01:59.034315 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Feb 18 01:01:59 crc kubenswrapper[4791]: I0218 01:01:59.359045 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerStarted","Data":"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5"} Feb 18 01:02:00 crc kubenswrapper[4791]: I0218 01:02:00.370864 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerID="bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5" exitCode=0 Feb 18 01:02:00 crc kubenswrapper[4791]: I0218 01:02:00.370910 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerDied","Data":"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5"} Feb 18 01:02:01 crc kubenswrapper[4791]: I0218 01:02:01.383226 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerStarted","Data":"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e"} Feb 18 01:02:01 crc kubenswrapper[4791]: I0218 01:02:01.409374 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8c989" podStartSLOduration=3.986894818 podStartE2EDuration="7.409355601s" podCreationTimestamp="2026-02-18 01:01:54 +0000 UTC" firstStartedPulling="2026-02-18 01:01:57.326362297 +0000 UTC m=+1658.894375477" lastFinishedPulling="2026-02-18 01:02:00.74882309 +0000 UTC m=+1662.316836260" observedRunningTime="2026-02-18 01:02:01.399988381 +0000 UTC m=+1662.968001561" watchObservedRunningTime="2026-02-18 01:02:01.409355601 +0000 UTC m=+1662.977368771" Feb 18 01:02:01 crc kubenswrapper[4791]: I0218 01:02:01.766079 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="rabbitmq" containerID="cri-o://90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551" gracePeriod=604796 Feb 18 01:02:05 crc kubenswrapper[4791]: I0218 01:02:05.093403 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:05 crc kubenswrapper[4791]: I0218 01:02:05.093975 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:05 crc kubenswrapper[4791]: I0218 01:02:05.105227 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Feb 18 01:02:05 crc kubenswrapper[4791]: I0218 01:02:05.148291 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.840818 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.843653 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.856776 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.952454 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.952539 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:06 crc kubenswrapper[4791]: I0218 01:02:06.952611 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxwpm\" (UniqueName: \"kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.054501 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.054572 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.054636 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxwpm\" (UniqueName: \"kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.055525 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.055739 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.074743 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxwpm\" (UniqueName: \"kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm\") pod \"certified-operators-sshfr\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.165662 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:07 crc kubenswrapper[4791]: E0218 01:02:07.178108 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:02:07 crc kubenswrapper[4791]: E0218 01:02:07.178182 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:02:07 crc kubenswrapper[4791]: E0218 01:02:07.178343 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:02:07 crc kubenswrapper[4791]: E0218 01:02:07.179543 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:02:07 crc kubenswrapper[4791]: I0218 01:02:07.625173 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:07 crc kubenswrapper[4791]: W0218 01:02:07.627902 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc102119_c06f_434e_af4d_c44b920538e5.slice/crio-99613f2af7af76f76facd8fac853b54cfd998e5d807a4a059be0cdd784514409 WatchSource:0}: Error finding container 99613f2af7af76f76facd8fac853b54cfd998e5d807a4a059be0cdd784514409: Status 404 returned error can't find the container with id 99613f2af7af76f76facd8fac853b54cfd998e5d807a4a059be0cdd784514409 Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.415963 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.484996 4791 generic.go:334] "Generic (PLEG): container finished" podID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerID="90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551" exitCode=0 Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.485057 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerDied","Data":"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551"} Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.485136 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.485452 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"519e8bd0-f30e-4ff2-be43-b33764a95351","Type":"ContainerDied","Data":"4f7b6b8544d4841de64a2043c9b8a430baa650e2e20ac3b845e6afeff20c1c22"} Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.485643 4791 scope.go:117] "RemoveContainer" containerID="90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487103 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487262 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487310 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd7qb\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487363 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487402 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487426 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487485 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487552 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487632 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487652 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.487703 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info\") pod \"519e8bd0-f30e-4ff2-be43-b33764a95351\" (UID: \"519e8bd0-f30e-4ff2-be43-b33764a95351\") " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.493759 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc102119-c06f-434e-af4d-c44b920538e5" containerID="6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6" exitCode=0 Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.494289 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerDied","Data":"6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6"} Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.494445 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerStarted","Data":"99613f2af7af76f76facd8fac853b54cfd998e5d807a4a059be0cdd784514409"} Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.496032 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info" (OuterVolumeSpecName: "pod-info") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.498791 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.499789 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.500331 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.502557 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb" (OuterVolumeSpecName: "kube-api-access-sd7qb") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "kube-api-access-sd7qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.529501 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.529889 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.547431 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c" (OuterVolumeSpecName: "persistence") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593766 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593802 4791 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/519e8bd0-f30e-4ff2-be43-b33764a95351-pod-info\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593825 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") on node \"crc\" " Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593835 4791 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/519e8bd0-f30e-4ff2-be43-b33764a95351-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593845 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd7qb\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-kube-api-access-sd7qb\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593854 4791 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593861 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.593869 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.617277 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data" (OuterVolumeSpecName: "config-data") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.646095 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.646282 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c") on node "crc" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.663767 4791 scope.go:117] "RemoveContainer" containerID="ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.673524 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf" (OuterVolumeSpecName: "server-conf") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.698222 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.698258 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.698270 4791 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/519e8bd0-f30e-4ff2-be43-b33764a95351-server-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.721129 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "519e8bd0-f30e-4ff2-be43-b33764a95351" (UID: "519e8bd0-f30e-4ff2-be43-b33764a95351"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.730724 4791 scope.go:117] "RemoveContainer" containerID="90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551" Feb 18 01:02:08 crc kubenswrapper[4791]: E0218 01:02:08.731314 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551\": container with ID starting with 90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551 not found: ID does not exist" containerID="90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.731352 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551"} err="failed to get container status \"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551\": rpc error: code = NotFound desc = could not find container \"90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551\": container with ID starting with 90f76616b352a9e3808f336ce236d176ba8f78c053809552f9a2bd12a6eac551 not found: ID does not exist" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.731379 4791 scope.go:117] "RemoveContainer" containerID="ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e" Feb 18 01:02:08 crc kubenswrapper[4791]: E0218 01:02:08.731734 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e\": container with ID starting with ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e not found: ID does not exist" containerID="ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.731762 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e"} err="failed to get container status \"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e\": rpc error: code = NotFound desc = could not find container \"ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e\": container with ID starting with ba43b1dd0a2b84c56947bbfb98f6d09a648684bfbeacf33f245015081ac2734e not found: ID does not exist" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.800381 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/519e8bd0-f30e-4ff2-be43-b33764a95351-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.823062 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.833953 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.857342 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:02:08 crc kubenswrapper[4791]: E0218 01:02:08.857992 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="rabbitmq" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.858061 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="rabbitmq" Feb 18 01:02:08 crc kubenswrapper[4791]: E0218 01:02:08.858090 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="setup-container" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.858098 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="setup-container" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.858489 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" containerName="rabbitmq" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.860546 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 01:02:08 crc kubenswrapper[4791]: I0218 01:02:08.872655 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.005635 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.005677 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-config-data\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.006289 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.006380 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e685bb75-4a24-4b83-8385-e6f5bdc9a526-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.006449 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.006539 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck5bm\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-kube-api-access-ck5bm\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.006640 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e685bb75-4a24-4b83-8385-e6f5bdc9a526-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.007466 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.007748 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.007858 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.008042 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.080097 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="519e8bd0-f30e-4ff2-be43-b33764a95351" path="/var/lib/kubelet/pods/519e8bd0-f30e-4ff2-be43-b33764a95351/volumes" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.112167 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-config-data\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.112362 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113172 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113203 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e685bb75-4a24-4b83-8385-e6f5bdc9a526-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113232 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113265 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck5bm\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-kube-api-access-ck5bm\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113324 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e685bb75-4a24-4b83-8385-e6f5bdc9a526-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113357 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113402 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-config-data\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113485 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113512 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.113573 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.114369 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-server-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.114733 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.114905 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e685bb75-4a24-4b83-8385-e6f5bdc9a526-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.115052 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.120788 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e685bb75-4a24-4b83-8385-e6f5bdc9a526-pod-info\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.120790 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e685bb75-4a24-4b83-8385-e6f5bdc9a526-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.120866 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.121779 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.130353 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.130399 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8fe3627c0588b651edad1eb27c527cf6c3fb938e5bed6793f718baee94b2ccc0/globalmount\"" pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.130880 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck5bm\" (UniqueName: \"kubernetes.io/projected/e685bb75-4a24-4b83-8385-e6f5bdc9a526-kube-api-access-ck5bm\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.196642 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-250fdf75-dad4-4c83-ac41-ea858e526e7c\") pod \"rabbitmq-server-1\" (UID: \"e685bb75-4a24-4b83-8385-e6f5bdc9a526\") " pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.202173 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.514577 4791 generic.go:334] "Generic (PLEG): container finished" podID="25084610-eb17-4a22-bb76-3b67b38e4402" containerID="4913b2c678419d477ee490915969ca69490f1b07b0e3080e1c6fd29d012b3cd6" exitCode=0 Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.514980 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" event={"ID":"25084610-eb17-4a22-bb76-3b67b38e4402","Type":"ContainerDied","Data":"4913b2c678419d477ee490915969ca69490f1b07b0e3080e1c6fd29d012b3cd6"} Feb 18 01:02:09 crc kubenswrapper[4791]: W0218 01:02:09.807313 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode685bb75_4a24_4b83_8385_e6f5bdc9a526.slice/crio-858cb8b3b36334f1e548404f19e45ddf80896f2e702e9bd310fb37495f3be387 WatchSource:0}: Error finding container 858cb8b3b36334f1e548404f19e45ddf80896f2e702e9bd310fb37495f3be387: Status 404 returned error can't find the container with id 858cb8b3b36334f1e548404f19e45ddf80896f2e702e9bd310fb37495f3be387 Feb 18 01:02:09 crc kubenswrapper[4791]: I0218 01:02:09.814876 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Feb 18 01:02:10 crc kubenswrapper[4791]: I0218 01:02:10.545538 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerStarted","Data":"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29"} Feb 18 01:02:10 crc kubenswrapper[4791]: I0218 01:02:10.553382 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e685bb75-4a24-4b83-8385-e6f5bdc9a526","Type":"ContainerStarted","Data":"858cb8b3b36334f1e548404f19e45ddf80896f2e702e9bd310fb37495f3be387"} Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.416360 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.468573 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5g46c\" (UniqueName: \"kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c\") pod \"25084610-eb17-4a22-bb76-3b67b38e4402\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.469385 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle\") pod \"25084610-eb17-4a22-bb76-3b67b38e4402\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.469457 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory\") pod \"25084610-eb17-4a22-bb76-3b67b38e4402\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.470471 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam\") pod \"25084610-eb17-4a22-bb76-3b67b38e4402\" (UID: \"25084610-eb17-4a22-bb76-3b67b38e4402\") " Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.475014 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c" (OuterVolumeSpecName: "kube-api-access-5g46c") pod "25084610-eb17-4a22-bb76-3b67b38e4402" (UID: "25084610-eb17-4a22-bb76-3b67b38e4402"). InnerVolumeSpecName "kube-api-access-5g46c". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.483068 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5g46c\" (UniqueName: \"kubernetes.io/projected/25084610-eb17-4a22-bb76-3b67b38e4402-kube-api-access-5g46c\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.506950 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "25084610-eb17-4a22-bb76-3b67b38e4402" (UID: "25084610-eb17-4a22-bb76-3b67b38e4402"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.523343 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "25084610-eb17-4a22-bb76-3b67b38e4402" (UID: "25084610-eb17-4a22-bb76-3b67b38e4402"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.566578 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory" (OuterVolumeSpecName: "inventory") pod "25084610-eb17-4a22-bb76-3b67b38e4402" (UID: "25084610-eb17-4a22-bb76-3b67b38e4402"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.575044 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" event={"ID":"25084610-eb17-4a22-bb76-3b67b38e4402","Type":"ContainerDied","Data":"ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f"} Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.575081 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.575085 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab9b55ca653bdf594f88004aba74573cbf3462f048831c20c5ac69954148b45f" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.576891 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc102119-c06f-434e-af4d-c44b920538e5" containerID="80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29" exitCode=0 Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.576949 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerDied","Data":"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29"} Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.585170 4791 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.585437 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.585450 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25084610-eb17-4a22-bb76-3b67b38e4402-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.589396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e685bb75-4a24-4b83-8385-e6f5bdc9a526","Type":"ContainerStarted","Data":"99b69186f2e2e9c0540d2ab2c79873a7758efd3302ec0a319563025c9dfd68f4"} Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.655906 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92"] Feb 18 01:02:11 crc kubenswrapper[4791]: E0218 01:02:11.656523 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25084610-eb17-4a22-bb76-3b67b38e4402" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.656550 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="25084610-eb17-4a22-bb76-3b67b38e4402" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.656865 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="25084610-eb17-4a22-bb76-3b67b38e4402" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.657858 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.661861 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.663471 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.663801 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.664065 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.684300 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92"] Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.687804 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.687877 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.688107 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgzlh\" (UniqueName: \"kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.790011 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgzlh\" (UniqueName: \"kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.790125 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.790208 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.794576 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.798590 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.809536 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgzlh\" (UniqueName: \"kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wzr92\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:11 crc kubenswrapper[4791]: I0218 01:02:11.992389 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:12 crc kubenswrapper[4791]: E0218 01:02:12.062879 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:02:12 crc kubenswrapper[4791]: I0218 01:02:12.602631 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerStarted","Data":"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d"} Feb 18 01:02:12 crc kubenswrapper[4791]: I0218 01:02:12.615035 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92"] Feb 18 01:02:13 crc kubenswrapper[4791]: I0218 01:02:13.614925 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" event={"ID":"52c76fd4-a538-4f07-ad60-4c3beb4490f7","Type":"ContainerStarted","Data":"4a0836f3f5f5288777b391091a88e7a4207903bbccf2818cc00e8eec404bdf00"} Feb 18 01:02:13 crc kubenswrapper[4791]: I0218 01:02:13.615520 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" event={"ID":"52c76fd4-a538-4f07-ad60-4c3beb4490f7","Type":"ContainerStarted","Data":"f8b1f03929754911df690e34160c6584b2897f2aa5d73f086d2c21ae43ea2ab6"} Feb 18 01:02:13 crc kubenswrapper[4791]: I0218 01:02:13.633014 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sshfr" podStartSLOduration=4.119829738 podStartE2EDuration="7.632994637s" podCreationTimestamp="2026-02-18 01:02:06 +0000 UTC" firstStartedPulling="2026-02-18 01:02:08.500525525 +0000 UTC m=+1670.068538735" lastFinishedPulling="2026-02-18 01:02:12.013690464 +0000 UTC m=+1673.581703634" observedRunningTime="2026-02-18 01:02:12.634669931 +0000 UTC m=+1674.202683101" watchObservedRunningTime="2026-02-18 01:02:13.632994637 +0000 UTC m=+1675.201007807" Feb 18 01:02:13 crc kubenswrapper[4791]: I0218 01:02:13.633532 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" podStartSLOduration=2.1064206 podStartE2EDuration="2.633527544s" podCreationTimestamp="2026-02-18 01:02:11 +0000 UTC" firstStartedPulling="2026-02-18 01:02:12.622009388 +0000 UTC m=+1674.190022558" lastFinishedPulling="2026-02-18 01:02:13.149116332 +0000 UTC m=+1674.717129502" observedRunningTime="2026-02-18 01:02:13.630742908 +0000 UTC m=+1675.198756098" watchObservedRunningTime="2026-02-18 01:02:13.633527544 +0000 UTC m=+1675.201540714" Feb 18 01:02:15 crc kubenswrapper[4791]: I0218 01:02:15.149958 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:15 crc kubenswrapper[4791]: I0218 01:02:15.207281 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:02:15 crc kubenswrapper[4791]: I0218 01:02:15.634995 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8c989" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="registry-server" containerID="cri-o://06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e" gracePeriod=2 Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.130940 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.306265 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzchs\" (UniqueName: \"kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs\") pod \"1bb93696-a43f-46ef-a640-cb34d7607c6f\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.306578 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content\") pod \"1bb93696-a43f-46ef-a640-cb34d7607c6f\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.306651 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities\") pod \"1bb93696-a43f-46ef-a640-cb34d7607c6f\" (UID: \"1bb93696-a43f-46ef-a640-cb34d7607c6f\") " Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.307342 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities" (OuterVolumeSpecName: "utilities") pod "1bb93696-a43f-46ef-a640-cb34d7607c6f" (UID: "1bb93696-a43f-46ef-a640-cb34d7607c6f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.311495 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs" (OuterVolumeSpecName: "kube-api-access-qzchs") pod "1bb93696-a43f-46ef-a640-cb34d7607c6f" (UID: "1bb93696-a43f-46ef-a640-cb34d7607c6f"). InnerVolumeSpecName "kube-api-access-qzchs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.352248 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1bb93696-a43f-46ef-a640-cb34d7607c6f" (UID: "1bb93696-a43f-46ef-a640-cb34d7607c6f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.408805 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.408836 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb93696-a43f-46ef-a640-cb34d7607c6f-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.408846 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzchs\" (UniqueName: \"kubernetes.io/projected/1bb93696-a43f-46ef-a640-cb34d7607c6f-kube-api-access-qzchs\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.645516 4791 generic.go:334] "Generic (PLEG): container finished" podID="52c76fd4-a538-4f07-ad60-4c3beb4490f7" containerID="4a0836f3f5f5288777b391091a88e7a4207903bbccf2818cc00e8eec404bdf00" exitCode=0 Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.645612 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" event={"ID":"52c76fd4-a538-4f07-ad60-4c3beb4490f7","Type":"ContainerDied","Data":"4a0836f3f5f5288777b391091a88e7a4207903bbccf2818cc00e8eec404bdf00"} Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.648387 4791 generic.go:334] "Generic (PLEG): container finished" podID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerID="06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e" exitCode=0 Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.648436 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerDied","Data":"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e"} Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.648457 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8c989" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.648472 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8c989" event={"ID":"1bb93696-a43f-46ef-a640-cb34d7607c6f","Type":"ContainerDied","Data":"5297f81f557290c7012b177d32cde7642984239c6ef7e0b1e6e6ccf906221fcc"} Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.648489 4791 scope.go:117] "RemoveContainer" containerID="06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.683974 4791 scope.go:117] "RemoveContainer" containerID="bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.690485 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.702138 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8c989"] Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.709950 4791 scope.go:117] "RemoveContainer" containerID="2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.806623 4791 scope.go:117] "RemoveContainer" containerID="06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e" Feb 18 01:02:16 crc kubenswrapper[4791]: E0218 01:02:16.807342 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e\": container with ID starting with 06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e not found: ID does not exist" containerID="06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.807461 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e"} err="failed to get container status \"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e\": rpc error: code = NotFound desc = could not find container \"06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e\": container with ID starting with 06d82fed01aa9c4823be63cb544ed0ccd15dd6e55eadb5f17a2222847950a30e not found: ID does not exist" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.807591 4791 scope.go:117] "RemoveContainer" containerID="bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5" Feb 18 01:02:16 crc kubenswrapper[4791]: E0218 01:02:16.808044 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5\": container with ID starting with bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5 not found: ID does not exist" containerID="bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.808087 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5"} err="failed to get container status \"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5\": rpc error: code = NotFound desc = could not find container \"bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5\": container with ID starting with bd2e3f0647b4b0a5a4e8171588417169e3096c8da43b268f9b6fbbbf0d8f83c5 not found: ID does not exist" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.808115 4791 scope.go:117] "RemoveContainer" containerID="2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883" Feb 18 01:02:16 crc kubenswrapper[4791]: E0218 01:02:16.808704 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883\": container with ID starting with 2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883 not found: ID does not exist" containerID="2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883" Feb 18 01:02:16 crc kubenswrapper[4791]: I0218 01:02:16.808765 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883"} err="failed to get container status \"2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883\": rpc error: code = NotFound desc = could not find container \"2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883\": container with ID starting with 2bb8a8e68a4b2e6cda1a81b35b878864b32424589b94616ba82ed532a6445883 not found: ID does not exist" Feb 18 01:02:17 crc kubenswrapper[4791]: I0218 01:02:17.072920 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" path="/var/lib/kubelet/pods/1bb93696-a43f-46ef-a640-cb34d7607c6f/volumes" Feb 18 01:02:17 crc kubenswrapper[4791]: I0218 01:02:17.166880 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:17 crc kubenswrapper[4791]: I0218 01:02:17.167139 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:17 crc kubenswrapper[4791]: I0218 01:02:17.223631 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:17 crc kubenswrapper[4791]: I0218 01:02:17.716104 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.221377 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.282354 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory\") pod \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.282441 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgzlh\" (UniqueName: \"kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh\") pod \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.282668 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam\") pod \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\" (UID: \"52c76fd4-a538-4f07-ad60-4c3beb4490f7\") " Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.287688 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh" (OuterVolumeSpecName: "kube-api-access-sgzlh") pod "52c76fd4-a538-4f07-ad60-4c3beb4490f7" (UID: "52c76fd4-a538-4f07-ad60-4c3beb4490f7"). InnerVolumeSpecName "kube-api-access-sgzlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.314317 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "52c76fd4-a538-4f07-ad60-4c3beb4490f7" (UID: "52c76fd4-a538-4f07-ad60-4c3beb4490f7"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.315575 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory" (OuterVolumeSpecName: "inventory") pod "52c76fd4-a538-4f07-ad60-4c3beb4490f7" (UID: "52c76fd4-a538-4f07-ad60-4c3beb4490f7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.384470 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.384501 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgzlh\" (UniqueName: \"kubernetes.io/projected/52c76fd4-a538-4f07-ad60-4c3beb4490f7-kube-api-access-sgzlh\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.384511 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/52c76fd4-a538-4f07-ad60-4c3beb4490f7-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.674818 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" event={"ID":"52c76fd4-a538-4f07-ad60-4c3beb4490f7","Type":"ContainerDied","Data":"f8b1f03929754911df690e34160c6584b2897f2aa5d73f086d2c21ae43ea2ab6"} Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.674910 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8b1f03929754911df690e34160c6584b2897f2aa5d73f086d2c21ae43ea2ab6" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.674868 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wzr92" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.746588 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2"] Feb 18 01:02:18 crc kubenswrapper[4791]: E0218 01:02:18.747132 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="extract-utilities" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747151 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="extract-utilities" Feb 18 01:02:18 crc kubenswrapper[4791]: E0218 01:02:18.747211 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="registry-server" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747218 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="registry-server" Feb 18 01:02:18 crc kubenswrapper[4791]: E0218 01:02:18.747226 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="extract-content" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747232 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="extract-content" Feb 18 01:02:18 crc kubenswrapper[4791]: E0218 01:02:18.747245 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52c76fd4-a538-4f07-ad60-4c3beb4490f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747252 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="52c76fd4-a538-4f07-ad60-4c3beb4490f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747467 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bb93696-a43f-46ef-a640-cb34d7607c6f" containerName="registry-server" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.747486 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="52c76fd4-a538-4f07-ad60-4c3beb4490f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.748332 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.760134 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.760333 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.760441 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.760557 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.762130 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2"] Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.795254 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.795675 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.795737 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.795906 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8tld\" (UniqueName: \"kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.897421 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8tld\" (UniqueName: \"kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.897542 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.897664 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.897684 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.903597 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.904520 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.904804 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:18 crc kubenswrapper[4791]: I0218 01:02:18.913708 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8tld\" (UniqueName: \"kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:19 crc kubenswrapper[4791]: E0218 01:02:19.074617 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:02:19 crc kubenswrapper[4791]: I0218 01:02:19.094583 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:02:19 crc kubenswrapper[4791]: I0218 01:02:19.095893 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:02:19 crc kubenswrapper[4791]: I0218 01:02:19.587542 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:19 crc kubenswrapper[4791]: I0218 01:02:19.835140 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2"] Feb 18 01:02:19 crc kubenswrapper[4791]: W0218 01:02:19.838617 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21e731fb_b216_48ca_b351_c1b511ed7617.slice/crio-233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f WatchSource:0}: Error finding container 233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f: Status 404 returned error can't find the container with id 233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f Feb 18 01:02:20 crc kubenswrapper[4791]: I0218 01:02:20.267754 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:02:20 crc kubenswrapper[4791]: I0218 01:02:20.700533 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" event={"ID":"21e731fb-b216-48ca-b351-c1b511ed7617","Type":"ContainerStarted","Data":"07a2b2fff4b6ff5b9d33957057f6995892b2cbc4de5cd2890a6d30a3f629c271"} Feb 18 01:02:20 crc kubenswrapper[4791]: I0218 01:02:20.700797 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" event={"ID":"21e731fb-b216-48ca-b351-c1b511ed7617","Type":"ContainerStarted","Data":"233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f"} Feb 18 01:02:20 crc kubenswrapper[4791]: I0218 01:02:20.700683 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sshfr" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="registry-server" containerID="cri-o://7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d" gracePeriod=2 Feb 18 01:02:20 crc kubenswrapper[4791]: I0218 01:02:20.737817 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" podStartSLOduration=2.324410368 podStartE2EDuration="2.737795484s" podCreationTimestamp="2026-02-18 01:02:18 +0000 UTC" firstStartedPulling="2026-02-18 01:02:19.842664139 +0000 UTC m=+1681.410677319" lastFinishedPulling="2026-02-18 01:02:20.256049265 +0000 UTC m=+1681.824062435" observedRunningTime="2026-02-18 01:02:20.721956535 +0000 UTC m=+1682.289969705" watchObservedRunningTime="2026-02-18 01:02:20.737795484 +0000 UTC m=+1682.305808654" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.248225 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.355527 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxwpm\" (UniqueName: \"kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm\") pod \"dc102119-c06f-434e-af4d-c44b920538e5\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.355757 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities\") pod \"dc102119-c06f-434e-af4d-c44b920538e5\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.355947 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content\") pod \"dc102119-c06f-434e-af4d-c44b920538e5\" (UID: \"dc102119-c06f-434e-af4d-c44b920538e5\") " Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.356612 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities" (OuterVolumeSpecName: "utilities") pod "dc102119-c06f-434e-af4d-c44b920538e5" (UID: "dc102119-c06f-434e-af4d-c44b920538e5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.361398 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm" (OuterVolumeSpecName: "kube-api-access-kxwpm") pod "dc102119-c06f-434e-af4d-c44b920538e5" (UID: "dc102119-c06f-434e-af4d-c44b920538e5"). InnerVolumeSpecName "kube-api-access-kxwpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.418494 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc102119-c06f-434e-af4d-c44b920538e5" (UID: "dc102119-c06f-434e-af4d-c44b920538e5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.459110 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxwpm\" (UniqueName: \"kubernetes.io/projected/dc102119-c06f-434e-af4d-c44b920538e5-kube-api-access-kxwpm\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.459343 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.459413 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc102119-c06f-434e-af4d-c44b920538e5-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.713925 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc102119-c06f-434e-af4d-c44b920538e5" containerID="7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d" exitCode=0 Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.713968 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerDied","Data":"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d"} Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.714277 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sshfr" event={"ID":"dc102119-c06f-434e-af4d-c44b920538e5","Type":"ContainerDied","Data":"99613f2af7af76f76facd8fac853b54cfd998e5d807a4a059be0cdd784514409"} Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.714312 4791 scope.go:117] "RemoveContainer" containerID="7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.714030 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sshfr" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.745268 4791 scope.go:117] "RemoveContainer" containerID="80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.776577 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.782540 4791 scope.go:117] "RemoveContainer" containerID="6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.790847 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sshfr"] Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.846959 4791 scope.go:117] "RemoveContainer" containerID="7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d" Feb 18 01:02:21 crc kubenswrapper[4791]: E0218 01:02:21.847478 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d\": container with ID starting with 7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d not found: ID does not exist" containerID="7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.847514 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d"} err="failed to get container status \"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d\": rpc error: code = NotFound desc = could not find container \"7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d\": container with ID starting with 7b9a69e72abd3dccc504ff3a89adfb40c188ed9026bcb019ce6841be92b5d64d not found: ID does not exist" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.847534 4791 scope.go:117] "RemoveContainer" containerID="80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29" Feb 18 01:02:21 crc kubenswrapper[4791]: E0218 01:02:21.847936 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29\": container with ID starting with 80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29 not found: ID does not exist" containerID="80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.847979 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29"} err="failed to get container status \"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29\": rpc error: code = NotFound desc = could not find container \"80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29\": container with ID starting with 80ead8ecb66d9cda2ded7e7d318b674c62a66e91b0c85685e4084bb06c68dd29 not found: ID does not exist" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.848008 4791 scope.go:117] "RemoveContainer" containerID="6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6" Feb 18 01:02:21 crc kubenswrapper[4791]: E0218 01:02:21.848513 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6\": container with ID starting with 6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6 not found: ID does not exist" containerID="6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6" Feb 18 01:02:21 crc kubenswrapper[4791]: I0218 01:02:21.848536 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6"} err="failed to get container status \"6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6\": rpc error: code = NotFound desc = could not find container \"6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6\": container with ID starting with 6f2775b059474db51df55e226d7c0045bb2434c4c732928e6f00c63afb4afdd6 not found: ID does not exist" Feb 18 01:02:23 crc kubenswrapper[4791]: I0218 01:02:23.078762 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc102119-c06f-434e-af4d-c44b920538e5" path="/var/lib/kubelet/pods/dc102119-c06f-434e-af4d-c44b920538e5/volumes" Feb 18 01:02:23 crc kubenswrapper[4791]: E0218 01:02:23.161742 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:02:23 crc kubenswrapper[4791]: E0218 01:02:23.161821 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:02:23 crc kubenswrapper[4791]: E0218 01:02:23.161973 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:02:23 crc kubenswrapper[4791]: E0218 01:02:23.163207 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:02:26 crc kubenswrapper[4791]: I0218 01:02:26.799769 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:02:26 crc kubenswrapper[4791]: I0218 01:02:26.800318 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:02:26 crc kubenswrapper[4791]: I0218 01:02:26.800358 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:02:26 crc kubenswrapper[4791]: I0218 01:02:26.801186 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:02:26 crc kubenswrapper[4791]: I0218 01:02:26.801240 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" gracePeriod=600 Feb 18 01:02:26 crc kubenswrapper[4791]: E0218 01:02:26.944769 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:02:27 crc kubenswrapper[4791]: I0218 01:02:27.808047 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" exitCode=0 Feb 18 01:02:27 crc kubenswrapper[4791]: I0218 01:02:27.808091 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a"} Feb 18 01:02:27 crc kubenswrapper[4791]: I0218 01:02:27.808123 4791 scope.go:117] "RemoveContainer" containerID="9c99f3d26f7a57737b6a7ef3e614fe415cd8c65f3089255fc52b1cd99b1db166" Feb 18 01:02:27 crc kubenswrapper[4791]: I0218 01:02:27.808889 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:02:27 crc kubenswrapper[4791]: E0218 01:02:27.809447 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.381878 4791 scope.go:117] "RemoveContainer" containerID="d28a2829a5f749af05c7386c584f40092b30f5afe8fb16c9c4f6c4e75db990c8" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.405757 4791 scope.go:117] "RemoveContainer" containerID="089bc538267743e9c2f9f7ead5b7b40d8d569980c326096728d6791f30f5df0a" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.466947 4791 scope.go:117] "RemoveContainer" containerID="c7d22ab55eda80d0c19e034ae89783c0e11b480e209f4e9a870fad65b3943224" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.532033 4791 scope.go:117] "RemoveContainer" containerID="5367edaf1f83dc270c9d689e0ceaf3e267f32356c387e9259f2b46e5292ccedc" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.588491 4791 scope.go:117] "RemoveContainer" containerID="61d1139739e53f052f116d30768f8dd16ba921eb312a90b1b4b3c0a05f784bd1" Feb 18 01:02:28 crc kubenswrapper[4791]: I0218 01:02:28.644004 4791 scope.go:117] "RemoveContainer" containerID="e1dfe2a5310805d76494d6704dc8f2dcbf23c7e485dcdfafdded666cca69479f" Feb 18 01:02:34 crc kubenswrapper[4791]: E0218 01:02:34.065378 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:02:36 crc kubenswrapper[4791]: E0218 01:02:36.063386 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:02:42 crc kubenswrapper[4791]: I0218 01:02:42.061837 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:02:42 crc kubenswrapper[4791]: E0218 01:02:42.062703 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:02:44 crc kubenswrapper[4791]: I0218 01:02:44.050311 4791 generic.go:334] "Generic (PLEG): container finished" podID="e685bb75-4a24-4b83-8385-e6f5bdc9a526" containerID="99b69186f2e2e9c0540d2ab2c79873a7758efd3302ec0a319563025c9dfd68f4" exitCode=0 Feb 18 01:02:44 crc kubenswrapper[4791]: I0218 01:02:44.050400 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e685bb75-4a24-4b83-8385-e6f5bdc9a526","Type":"ContainerDied","Data":"99b69186f2e2e9c0540d2ab2c79873a7758efd3302ec0a319563025c9dfd68f4"} Feb 18 01:02:45 crc kubenswrapper[4791]: I0218 01:02:45.074431 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"e685bb75-4a24-4b83-8385-e6f5bdc9a526","Type":"ContainerStarted","Data":"9435265ee63c2fe0fba838f99be9f3ffefbdb32e8d43d7f0d78ad445059dc79b"} Feb 18 01:02:45 crc kubenswrapper[4791]: I0218 01:02:45.074954 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Feb 18 01:02:45 crc kubenswrapper[4791]: I0218 01:02:45.130582 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=37.130561403 podStartE2EDuration="37.130561403s" podCreationTimestamp="2026-02-18 01:02:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:02:45.117946324 +0000 UTC m=+1706.685959494" watchObservedRunningTime="2026-02-18 01:02:45.130561403 +0000 UTC m=+1706.698574573" Feb 18 01:02:47 crc kubenswrapper[4791]: E0218 01:02:47.063289 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:02:47 crc kubenswrapper[4791]: E0218 01:02:47.064244 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:02:54 crc kubenswrapper[4791]: I0218 01:02:54.061647 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:02:54 crc kubenswrapper[4791]: E0218 01:02:54.062547 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:02:59 crc kubenswrapper[4791]: E0218 01:02:59.076712 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:02:59 crc kubenswrapper[4791]: I0218 01:02:59.205314 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Feb 18 01:02:59 crc kubenswrapper[4791]: I0218 01:02:59.267189 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:02 crc kubenswrapper[4791]: E0218 01:03:02.184634 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:03:02 crc kubenswrapper[4791]: E0218 01:03:02.185249 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:03:02 crc kubenswrapper[4791]: E0218 01:03:02.185408 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:03:02 crc kubenswrapper[4791]: E0218 01:03:02.186606 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:03:03 crc kubenswrapper[4791]: I0218 01:03:03.484903 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" containerID="cri-o://87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f" gracePeriod=604796 Feb 18 01:03:04 crc kubenswrapper[4791]: I0218 01:03:04.773975 4791 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Feb 18 01:03:06 crc kubenswrapper[4791]: I0218 01:03:06.062206 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:03:06 crc kubenswrapper[4791]: E0218 01:03:06.062923 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.063447 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.123824 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244087 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244392 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244513 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244549 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244646 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.244711 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.245987 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.246079 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.246124 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkc6z\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.246189 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.246231 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd\") pod \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\" (UID: \"10fd526e-f41c-4c4a-8e15-239cd3ac37da\") " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.252413 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.252472 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.254035 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info" (OuterVolumeSpecName: "pod-info") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.255319 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.266474 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.266650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.267702 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z" (OuterVolumeSpecName: "kube-api-access-mkc6z") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "kube-api-access-mkc6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.283117 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data" (OuterVolumeSpecName: "config-data") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.300061 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161" (OuterVolumeSpecName: "persistence") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.327504 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf" (OuterVolumeSpecName: "server-conf") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355077 4791 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/10fd526e-f41c-4c4a-8e15-239cd3ac37da-pod-info\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355120 4791 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/10fd526e-f41c-4c4a-8e15-239cd3ac37da-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355130 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355143 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355165 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355195 4791 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") on node \"crc\" " Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355207 4791 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-plugins-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355220 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkc6z\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-kube-api-access-mkc6z\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355231 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.355241 4791 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/10fd526e-f41c-4c4a-8e15-239cd3ac37da-server-conf\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.375654 4791 generic.go:334] "Generic (PLEG): container finished" podID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerID="87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f" exitCode=0 Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.375697 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerDied","Data":"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f"} Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.375724 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"10fd526e-f41c-4c4a-8e15-239cd3ac37da","Type":"ContainerDied","Data":"4a1f9d9450293995e12f78d3d871e2be63f090c468a6fe6e1c99db1d9fb8b5ad"} Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.375739 4791 scope.go:117] "RemoveContainer" containerID="87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.375868 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.387480 4791 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.387634 4791 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161") on node "crc" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.397641 4791 scope.go:117] "RemoveContainer" containerID="583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.402062 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "10fd526e-f41c-4c4a-8e15-239cd3ac37da" (UID: "10fd526e-f41c-4c4a-8e15-239cd3ac37da"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.422278 4791 scope.go:117] "RemoveContainer" containerID="87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.422721 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f\": container with ID starting with 87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f not found: ID does not exist" containerID="87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.422770 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f"} err="failed to get container status \"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f\": rpc error: code = NotFound desc = could not find container \"87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f\": container with ID starting with 87dbe269e6976b92cf76cf29ccc6023b76915423a9f99644c462783a776b728f not found: ID does not exist" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.422796 4791 scope.go:117] "RemoveContainer" containerID="583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.423082 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4\": container with ID starting with 583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4 not found: ID does not exist" containerID="583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.423121 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4"} err="failed to get container status \"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4\": rpc error: code = NotFound desc = could not find container \"583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4\": container with ID starting with 583912c29332d6d7363a4c367b463077382969b5fb9887301f48fb4d2083c4d4 not found: ID does not exist" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.457391 4791 reconciler_common.go:293] "Volume detached for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.457425 4791 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/10fd526e-f41c-4c4a-8e15-239cd3ac37da-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.714691 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.733520 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.801664 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.806967 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="setup-container" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807012 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="setup-container" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.807065 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="extract-utilities" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807072 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="extract-utilities" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.807095 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="registry-server" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807101 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="registry-server" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.807131 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="extract-content" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807137 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="extract-content" Feb 18 01:03:10 crc kubenswrapper[4791]: E0218 01:03:10.807252 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807261 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807729 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc102119-c06f-434e-af4d-c44b920538e5" containerName="registry-server" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.807765 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" containerName="rabbitmq" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.815557 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.819251 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981406 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-config-data\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981477 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981528 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981563 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981583 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld9jf\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-kube-api-access-ld9jf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981609 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981658 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981678 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/516b6fcb-95c5-4c07-80b8-e1904970035b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981699 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/516b6fcb-95c5-4c07-80b8-e1904970035b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981721 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:10 crc kubenswrapper[4791]: I0218 01:03:10.981751 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.075011 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10fd526e-f41c-4c4a-8e15-239cd3ac37da" path="/var/lib/kubelet/pods/10fd526e-f41c-4c4a-8e15-239cd3ac37da/volumes" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.083796 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.083882 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.083914 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld9jf\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-kube-api-access-ld9jf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.083952 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.083989 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084016 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/516b6fcb-95c5-4c07-80b8-e1904970035b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084035 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/516b6fcb-95c5-4c07-80b8-e1904970035b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084057 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084093 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084247 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-config-data\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084299 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084769 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.084790 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.085698 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-config-data\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.085799 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.086496 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/516b6fcb-95c5-4c07-80b8-e1904970035b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.088638 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/516b6fcb-95c5-4c07-80b8-e1904970035b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.088817 4791 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.088843 4791 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3f899ddc76ac764fa06ebb180a6e42e627d315f5b0d60d5f18cf3a3154ff692c/globalmount\"" pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.089512 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.090641 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/516b6fcb-95c5-4c07-80b8-e1904970035b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.091937 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.104076 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld9jf\" (UniqueName: \"kubernetes.io/projected/516b6fcb-95c5-4c07-80b8-e1904970035b-kube-api-access-ld9jf\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.162602 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-fa7fafa5-cc4e-44a6-ba81-4e2329b24161\") pod \"rabbitmq-server-0\" (UID: \"516b6fcb-95c5-4c07-80b8-e1904970035b\") " pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.168477 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Feb 18 01:03:11 crc kubenswrapper[4791]: I0218 01:03:11.692634 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Feb 18 01:03:12 crc kubenswrapper[4791]: I0218 01:03:12.406944 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"516b6fcb-95c5-4c07-80b8-e1904970035b","Type":"ContainerStarted","Data":"e75151fea7746943ef76ea986ff7138bf6beeacc447ab8a1c97642c0210b78fa"} Feb 18 01:03:14 crc kubenswrapper[4791]: I0218 01:03:14.435919 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"516b6fcb-95c5-4c07-80b8-e1904970035b","Type":"ContainerStarted","Data":"f0014026b79d0193fde47fe6eaf4769e7104febdcae8aa430aaa298876fe79fe"} Feb 18 01:03:17 crc kubenswrapper[4791]: E0218 01:03:17.068192 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:03:20 crc kubenswrapper[4791]: I0218 01:03:20.061572 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:03:20 crc kubenswrapper[4791]: E0218 01:03:20.062278 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:03:24 crc kubenswrapper[4791]: E0218 01:03:24.064914 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:03:28 crc kubenswrapper[4791]: I0218 01:03:28.985884 4791 scope.go:117] "RemoveContainer" containerID="836d31d22349a128a7ff7e08ea60573c4c2aaa743d22abef5271a32bde71957a" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.015358 4791 scope.go:117] "RemoveContainer" containerID="b70cea53289af40be549b0539d24b881d72ecd7bda3b4c271eb10c4ba2763f03" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.058244 4791 scope.go:117] "RemoveContainer" containerID="9329556c92ed5aba8950fdfd59f03322f7fe49cb2846b2d4821c4886e470bbdc" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.098379 4791 scope.go:117] "RemoveContainer" containerID="c46bbf3c6e216e41d39c408c458c51b28e0e4d172a02e1c7b1fe4d777a6ed942" Feb 18 01:03:29 crc kubenswrapper[4791]: E0218 01:03:29.111448 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.133705 4791 scope.go:117] "RemoveContainer" containerID="639e940835e4551da6a4c6a12bf4308ae41111a0d11b66916ddbbe88b718402b" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.167300 4791 scope.go:117] "RemoveContainer" containerID="a2fb9d3f2721062df8346d68d8ee9af9d4ba452dfc9e2048ab3e38c7a86ecb4b" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.189607 4791 scope.go:117] "RemoveContainer" containerID="2ad408151a4680c068a341aaaf8055b47f2bfe9f3d2495c19353d4be7a9abdc1" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.233842 4791 scope.go:117] "RemoveContainer" containerID="d384decf40d59f4ba2dec8e75acfc449cf8a5c5dd621dd567534d6975d54d198" Feb 18 01:03:29 crc kubenswrapper[4791]: I0218 01:03:29.291177 4791 scope.go:117] "RemoveContainer" containerID="90ed4e8dc4679ea6fcf919bb31e3b7a83c8aa65e8912ee66ffd34e54dc37a365" Feb 18 01:03:34 crc kubenswrapper[4791]: I0218 01:03:34.062892 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:03:34 crc kubenswrapper[4791]: E0218 01:03:34.064385 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:03:36 crc kubenswrapper[4791]: E0218 01:03:36.063389 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:03:41 crc kubenswrapper[4791]: E0218 01:03:41.064402 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:03:46 crc kubenswrapper[4791]: I0218 01:03:46.902277 4791 generic.go:334] "Generic (PLEG): container finished" podID="516b6fcb-95c5-4c07-80b8-e1904970035b" containerID="f0014026b79d0193fde47fe6eaf4769e7104febdcae8aa430aaa298876fe79fe" exitCode=0 Feb 18 01:03:46 crc kubenswrapper[4791]: I0218 01:03:46.902388 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"516b6fcb-95c5-4c07-80b8-e1904970035b","Type":"ContainerDied","Data":"f0014026b79d0193fde47fe6eaf4769e7104febdcae8aa430aaa298876fe79fe"} Feb 18 01:03:47 crc kubenswrapper[4791]: E0218 01:03:47.195110 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:03:47 crc kubenswrapper[4791]: E0218 01:03:47.195602 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:03:47 crc kubenswrapper[4791]: E0218 01:03:47.195815 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:03:47 crc kubenswrapper[4791]: E0218 01:03:47.197093 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:03:47 crc kubenswrapper[4791]: I0218 01:03:47.915878 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"516b6fcb-95c5-4c07-80b8-e1904970035b","Type":"ContainerStarted","Data":"bb7e4f099f2f231b65dc99b922ac95b8a9cd3d4f9820cbbe0b1106f903dd5513"} Feb 18 01:03:47 crc kubenswrapper[4791]: I0218 01:03:47.916396 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Feb 18 01:03:47 crc kubenswrapper[4791]: I0218 01:03:47.952333 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.952316199 podStartE2EDuration="37.952316199s" podCreationTimestamp="2026-02-18 01:03:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 01:03:47.943530058 +0000 UTC m=+1769.511543228" watchObservedRunningTime="2026-02-18 01:03:47.952316199 +0000 UTC m=+1769.520329369" Feb 18 01:03:49 crc kubenswrapper[4791]: I0218 01:03:49.071178 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:03:49 crc kubenswrapper[4791]: E0218 01:03:49.071956 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:03:55 crc kubenswrapper[4791]: E0218 01:03:55.065515 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:01 crc kubenswrapper[4791]: I0218 01:04:01.171381 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Feb 18 01:04:02 crc kubenswrapper[4791]: I0218 01:04:02.062041 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:04:02 crc kubenswrapper[4791]: E0218 01:04:02.062325 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:04:02 crc kubenswrapper[4791]: E0218 01:04:02.063069 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:04:08 crc kubenswrapper[4791]: E0218 01:04:08.064626 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:14 crc kubenswrapper[4791]: I0218 01:04:14.063239 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:04:14 crc kubenswrapper[4791]: E0218 01:04:14.064084 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:04:17 crc kubenswrapper[4791]: E0218 01:04:17.066241 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:04:20 crc kubenswrapper[4791]: E0218 01:04:20.063052 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:29 crc kubenswrapper[4791]: I0218 01:04:29.070706 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:04:29 crc kubenswrapper[4791]: E0218 01:04:29.071671 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:04:29 crc kubenswrapper[4791]: I0218 01:04:29.485295 4791 scope.go:117] "RemoveContainer" containerID="77fb6dfb5b6586dbc3650937321e102d9772e2b1393976dd61111617153d1186" Feb 18 01:04:29 crc kubenswrapper[4791]: I0218 01:04:29.514113 4791 scope.go:117] "RemoveContainer" containerID="f4c1fcdacff64d1d556094357b41cb186af1fc2cb5b33cc0e2ed8cf86303b2c4" Feb 18 01:04:29 crc kubenswrapper[4791]: I0218 01:04:29.536051 4791 scope.go:117] "RemoveContainer" containerID="440ee2c00e7d300bf8a8b18e44ec7c9726274b5db6e2405a55b4c05dd7fbc1d6" Feb 18 01:04:30 crc kubenswrapper[4791]: E0218 01:04:30.063725 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:04:31 crc kubenswrapper[4791]: E0218 01:04:31.175339 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:04:31 crc kubenswrapper[4791]: E0218 01:04:31.176786 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:04:31 crc kubenswrapper[4791]: E0218 01:04:31.177238 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:04:31 crc kubenswrapper[4791]: E0218 01:04:31.178576 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:41 crc kubenswrapper[4791]: I0218 01:04:41.063317 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:04:41 crc kubenswrapper[4791]: E0218 01:04:41.064675 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:04:42 crc kubenswrapper[4791]: E0218 01:04:42.063917 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:43 crc kubenswrapper[4791]: E0218 01:04:43.063464 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:04:53 crc kubenswrapper[4791]: E0218 01:04:53.068113 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:04:54 crc kubenswrapper[4791]: I0218 01:04:54.061511 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:04:54 crc kubenswrapper[4791]: E0218 01:04:54.062252 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:04:55 crc kubenswrapper[4791]: E0218 01:04:55.066190 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:05:08 crc kubenswrapper[4791]: I0218 01:05:08.062048 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:05:08 crc kubenswrapper[4791]: E0218 01:05:08.062883 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:05:08 crc kubenswrapper[4791]: E0218 01:05:08.065330 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:05:10 crc kubenswrapper[4791]: E0218 01:05:10.063467 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:05:13 crc kubenswrapper[4791]: I0218 01:05:13.909245 4791 generic.go:334] "Generic (PLEG): container finished" podID="21e731fb-b216-48ca-b351-c1b511ed7617" containerID="07a2b2fff4b6ff5b9d33957057f6995892b2cbc4de5cd2890a6d30a3f629c271" exitCode=0 Feb 18 01:05:13 crc kubenswrapper[4791]: I0218 01:05:13.909323 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" event={"ID":"21e731fb-b216-48ca-b351-c1b511ed7617","Type":"ContainerDied","Data":"07a2b2fff4b6ff5b9d33957057f6995892b2cbc4de5cd2890a6d30a3f629c271"} Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.469904 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.564141 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8tld\" (UniqueName: \"kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld\") pod \"21e731fb-b216-48ca-b351-c1b511ed7617\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.564377 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory\") pod \"21e731fb-b216-48ca-b351-c1b511ed7617\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.564511 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam\") pod \"21e731fb-b216-48ca-b351-c1b511ed7617\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.564591 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle\") pod \"21e731fb-b216-48ca-b351-c1b511ed7617\" (UID: \"21e731fb-b216-48ca-b351-c1b511ed7617\") " Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.572909 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld" (OuterVolumeSpecName: "kube-api-access-d8tld") pod "21e731fb-b216-48ca-b351-c1b511ed7617" (UID: "21e731fb-b216-48ca-b351-c1b511ed7617"). InnerVolumeSpecName "kube-api-access-d8tld". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.574073 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "21e731fb-b216-48ca-b351-c1b511ed7617" (UID: "21e731fb-b216-48ca-b351-c1b511ed7617"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.601226 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "21e731fb-b216-48ca-b351-c1b511ed7617" (UID: "21e731fb-b216-48ca-b351-c1b511ed7617"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.630461 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory" (OuterVolumeSpecName: "inventory") pod "21e731fb-b216-48ca-b351-c1b511ed7617" (UID: "21e731fb-b216-48ca-b351-c1b511ed7617"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.669325 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.669408 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.669429 4791 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e731fb-b216-48ca-b351-c1b511ed7617-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.669477 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8tld\" (UniqueName: \"kubernetes.io/projected/21e731fb-b216-48ca-b351-c1b511ed7617-kube-api-access-d8tld\") on node \"crc\" DevicePath \"\"" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.938490 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" event={"ID":"21e731fb-b216-48ca-b351-c1b511ed7617","Type":"ContainerDied","Data":"233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f"} Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.938531 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2" Feb 18 01:05:15 crc kubenswrapper[4791]: I0218 01:05:15.938541 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="233937256b1ea23b8d091a6e65ef714975209f3552ca4a37227218cc085ba56f" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.032978 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62"] Feb 18 01:05:16 crc kubenswrapper[4791]: E0218 01:05:16.033707 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21e731fb-b216-48ca-b351-c1b511ed7617" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.033735 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="21e731fb-b216-48ca-b351-c1b511ed7617" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.034984 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="21e731fb-b216-48ca-b351-c1b511ed7617" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.036314 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.055792 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.055890 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.056103 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.056318 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.080115 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62"] Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.081334 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.081378 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6hqw\" (UniqueName: \"kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.081426 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.183109 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.183382 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6hqw\" (UniqueName: \"kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.183507 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.186584 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.191373 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.199802 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6hqw\" (UniqueName: \"kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-55w62\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.380713 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.918307 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62"] Feb 18 01:05:16 crc kubenswrapper[4791]: I0218 01:05:16.950073 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" event={"ID":"6fbf1afd-08c9-4fb1-87b9-816b3846145b","Type":"ContainerStarted","Data":"5831b9baafdedccec884acd39eb40a1624424d42f8c4125703783da3f63f37e0"} Feb 18 01:05:17 crc kubenswrapper[4791]: I0218 01:05:17.964118 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" event={"ID":"6fbf1afd-08c9-4fb1-87b9-816b3846145b","Type":"ContainerStarted","Data":"6097a7048becbff785028b078c71413b1f6be37139420083d44a0b29843a717d"} Feb 18 01:05:17 crc kubenswrapper[4791]: I0218 01:05:17.993802 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" podStartSLOduration=1.469108242 podStartE2EDuration="1.99377979s" podCreationTimestamp="2026-02-18 01:05:16 +0000 UTC" firstStartedPulling="2026-02-18 01:05:16.909779537 +0000 UTC m=+1858.477792707" lastFinishedPulling="2026-02-18 01:05:17.434451085 +0000 UTC m=+1859.002464255" observedRunningTime="2026-02-18 01:05:17.980175851 +0000 UTC m=+1859.548189041" watchObservedRunningTime="2026-02-18 01:05:17.99377979 +0000 UTC m=+1859.561792960" Feb 18 01:05:21 crc kubenswrapper[4791]: E0218 01:05:21.064681 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:05:23 crc kubenswrapper[4791]: I0218 01:05:23.061360 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:05:23 crc kubenswrapper[4791]: E0218 01:05:23.062050 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:05:25 crc kubenswrapper[4791]: E0218 01:05:25.063783 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:05:29 crc kubenswrapper[4791]: I0218 01:05:29.661238 4791 scope.go:117] "RemoveContainer" containerID="4281bd153240a2db11bd85e303e0a03654d5bb8c72fce9e354d1e6e9ba248139" Feb 18 01:05:29 crc kubenswrapper[4791]: I0218 01:05:29.687921 4791 scope.go:117] "RemoveContainer" containerID="9d40c13311df164d949c3c135d4cb2d915e261aabafc59a3f3469bd6185abadb" Feb 18 01:05:29 crc kubenswrapper[4791]: I0218 01:05:29.725421 4791 scope.go:117] "RemoveContainer" containerID="3cb45baf8135e12ca924626a7a191c3c2463700ec1ede1092efa6f63dc0208fd" Feb 18 01:05:34 crc kubenswrapper[4791]: E0218 01:05:34.063705 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:05:34 crc kubenswrapper[4791]: I0218 01:05:34.071131 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-a83e-account-create-update-zjqhp"] Feb 18 01:05:34 crc kubenswrapper[4791]: I0218 01:05:34.086794 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-a83e-account-create-update-zjqhp"] Feb 18 01:05:35 crc kubenswrapper[4791]: I0218 01:05:35.029516 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-qd8vw"] Feb 18 01:05:35 crc kubenswrapper[4791]: I0218 01:05:35.041172 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-qd8vw"] Feb 18 01:05:35 crc kubenswrapper[4791]: I0218 01:05:35.076448 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3" path="/var/lib/kubelet/pods/075c0f15-e53e-4fb6-a7d4-eccf3a44cdf3/volumes" Feb 18 01:05:35 crc kubenswrapper[4791]: I0218 01:05:35.077857 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c85e1e81-d34b-45e3-9b7a-f73c712f6ad8" path="/var/lib/kubelet/pods/c85e1e81-d34b-45e3-9b7a-f73c712f6ad8/volumes" Feb 18 01:05:36 crc kubenswrapper[4791]: E0218 01:05:36.064689 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:05:37 crc kubenswrapper[4791]: I0218 01:05:37.062373 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:05:37 crc kubenswrapper[4791]: E0218 01:05:37.065011 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:05:40 crc kubenswrapper[4791]: I0218 01:05:40.050546 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql"] Feb 18 01:05:40 crc kubenswrapper[4791]: I0218 01:05:40.113958 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-nw6ql"] Feb 18 01:05:41 crc kubenswrapper[4791]: I0218 01:05:41.027864 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-cae6-account-create-update-jthwt"] Feb 18 01:05:41 crc kubenswrapper[4791]: I0218 01:05:41.040620 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-cae6-account-create-update-jthwt"] Feb 18 01:05:41 crc kubenswrapper[4791]: I0218 01:05:41.072180 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c66a47b-ef25-4487-b69c-1a7c7454631d" path="/var/lib/kubelet/pods/3c66a47b-ef25-4487-b69c-1a7c7454631d/volumes" Feb 18 01:05:41 crc kubenswrapper[4791]: I0218 01:05:41.073479 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba41ccfb-9ea8-47e2-a814-d958e7ced77f" path="/var/lib/kubelet/pods/ba41ccfb-9ea8-47e2-a814-d958e7ced77f/volumes" Feb 18 01:05:43 crc kubenswrapper[4791]: I0218 01:05:43.046027 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-5db2-account-create-update-kbtp7"] Feb 18 01:05:43 crc kubenswrapper[4791]: I0218 01:05:43.058181 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-9mfr2"] Feb 18 01:05:43 crc kubenswrapper[4791]: I0218 01:05:43.079603 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-9mfr2"] Feb 18 01:05:43 crc kubenswrapper[4791]: I0218 01:05:43.088605 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-5db2-account-create-update-kbtp7"] Feb 18 01:05:45 crc kubenswrapper[4791]: I0218 01:05:45.073059 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a71fec1-6980-4269-88ef-bb233a4f35a4" path="/var/lib/kubelet/pods/6a71fec1-6980-4269-88ef-bb233a4f35a4/volumes" Feb 18 01:05:45 crc kubenswrapper[4791]: I0218 01:05:45.074102 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7641efb-f2fc-47fe-806d-074f2de15773" path="/var/lib/kubelet/pods/c7641efb-f2fc-47fe-806d-074f2de15773/volumes" Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.047072 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bd9d-account-create-update-6nfrs"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.082097 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-x8dzl"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.083023 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-5kfk7"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.096867 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-e552-account-create-update-lbd8g"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.107380 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-5kfk7"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.118082 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bd9d-account-create-update-6nfrs"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.128909 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-x8dzl"] Feb 18 01:05:47 crc kubenswrapper[4791]: I0218 01:05:47.140610 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-e552-account-create-update-lbd8g"] Feb 18 01:05:49 crc kubenswrapper[4791]: E0218 01:05:49.071364 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:05:49 crc kubenswrapper[4791]: I0218 01:05:49.075541 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1568271d-f2b8-4f5a-abf8-cd9ea1e6750b" path="/var/lib/kubelet/pods/1568271d-f2b8-4f5a-abf8-cd9ea1e6750b/volumes" Feb 18 01:05:49 crc kubenswrapper[4791]: I0218 01:05:49.076395 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="307ef073-8e53-4545-9da7-88c82a038dd0" path="/var/lib/kubelet/pods/307ef073-8e53-4545-9da7-88c82a038dd0/volumes" Feb 18 01:05:49 crc kubenswrapper[4791]: I0218 01:05:49.077016 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61227f77-9a0c-48f9-8dda-9fa75c00b71a" path="/var/lib/kubelet/pods/61227f77-9a0c-48f9-8dda-9fa75c00b71a/volumes" Feb 18 01:05:49 crc kubenswrapper[4791]: I0218 01:05:49.077645 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f40402ed-56dc-452a-9fc4-46008591a6ab" path="/var/lib/kubelet/pods/f40402ed-56dc-452a-9fc4-46008591a6ab/volumes" Feb 18 01:05:50 crc kubenswrapper[4791]: E0218 01:05:50.063665 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:05:52 crc kubenswrapper[4791]: I0218 01:05:52.061191 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:05:52 crc kubenswrapper[4791]: E0218 01:05:52.061698 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:06:00 crc kubenswrapper[4791]: E0218 01:06:00.063149 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:06:03 crc kubenswrapper[4791]: E0218 01:06:03.063549 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:06:07 crc kubenswrapper[4791]: I0218 01:06:07.064644 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:06:07 crc kubenswrapper[4791]: E0218 01:06:07.065848 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:06:15 crc kubenswrapper[4791]: E0218 01:06:15.063217 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:06:15 crc kubenswrapper[4791]: E0218 01:06:15.063702 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.075603 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d604-account-create-update-jl6cf"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.078689 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ced8-account-create-update-7vkh4"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.092203 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-cskgf"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.102970 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-fdm5n"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.112910 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-fdm5n"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.123108 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ced8-account-create-update-7vkh4"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.133619 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-cskgf"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.144776 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d604-account-create-update-jl6cf"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.155866 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-4vhk9"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.166104 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-4vhk9"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.177607 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-2992-account-create-update-lf4d8"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.188688 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-kdm9j"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.199606 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-mhld5"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.211228 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-kdm9j"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.229766 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-mhld5"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.240141 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-2992-account-create-update-lf4d8"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.251118 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-7843-account-create-update-95sgt"] Feb 18 01:06:15 crc kubenswrapper[4791]: I0218 01:06:15.260951 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-7843-account-create-update-95sgt"] Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.075048 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="007e5d84-b863-42c0-88da-d72362b8f0af" path="/var/lib/kubelet/pods/007e5d84-b863-42c0-88da-d72362b8f0af/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.077856 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e122e7c-a512-4608-ae3a-74e528fcfed4" path="/var/lib/kubelet/pods/3e122e7c-a512-4608-ae3a-74e528fcfed4/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.079663 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5227c08e-63f7-4ec3-b01f-ad54d550ce8e" path="/var/lib/kubelet/pods/5227c08e-63f7-4ec3-b01f-ad54d550ce8e/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.081400 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a54c4f28-a37a-446e-a455-f471bfb7b4fa" path="/var/lib/kubelet/pods/a54c4f28-a37a-446e-a455-f471bfb7b4fa/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.082984 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab28bbf6-b31e-4634-9a35-1e2333b10adc" path="/var/lib/kubelet/pods/ab28bbf6-b31e-4634-9a35-1e2333b10adc/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.084282 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adad799a-952a-4ae3-8c37-02707bf01576" path="/var/lib/kubelet/pods/adad799a-952a-4ae3-8c37-02707bf01576/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.086087 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f27aa21f-8528-401f-b41a-090ee07af740" path="/var/lib/kubelet/pods/f27aa21f-8528-401f-b41a-090ee07af740/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.087700 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4551c6b-bd8f-49bd-8755-4c1262f74e04" path="/var/lib/kubelet/pods/f4551c6b-bd8f-49bd-8755-4c1262f74e04/volumes" Feb 18 01:06:17 crc kubenswrapper[4791]: I0218 01:06:17.088355 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f91b47af-6331-4d04-b4b5-9197d09fd773" path="/var/lib/kubelet/pods/f91b47af-6331-4d04-b4b5-9197d09fd773/volumes" Feb 18 01:06:20 crc kubenswrapper[4791]: I0218 01:06:20.061407 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:06:20 crc kubenswrapper[4791]: E0218 01:06:20.062313 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:06:22 crc kubenswrapper[4791]: I0218 01:06:22.037679 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-9rmnf"] Feb 18 01:06:22 crc kubenswrapper[4791]: I0218 01:06:22.051808 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wx5vg"] Feb 18 01:06:22 crc kubenswrapper[4791]: I0218 01:06:22.063418 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-9rmnf"] Feb 18 01:06:22 crc kubenswrapper[4791]: I0218 01:06:22.080891 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wx5vg"] Feb 18 01:06:23 crc kubenswrapper[4791]: I0218 01:06:23.115765 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57936f2c-e43e-48c1-b59e-cfe1ae0f99b7" path="/var/lib/kubelet/pods/57936f2c-e43e-48c1-b59e-cfe1ae0f99b7/volumes" Feb 18 01:06:23 crc kubenswrapper[4791]: I0218 01:06:23.120056 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e48266b0-04c5-4da5-994e-516ef8e36299" path="/var/lib/kubelet/pods/e48266b0-04c5-4da5-994e-516ef8e36299/volumes" Feb 18 01:06:28 crc kubenswrapper[4791]: E0218 01:06:28.064065 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:06:29 crc kubenswrapper[4791]: I0218 01:06:29.818851 4791 scope.go:117] "RemoveContainer" containerID="8194aec0d8a4b7518de3478bfc35b8845308657e96e069ea0ccdd86302f683a1" Feb 18 01:06:29 crc kubenswrapper[4791]: I0218 01:06:29.854814 4791 scope.go:117] "RemoveContainer" containerID="c87acc8bd3df9976cb5b04770c7953204386cfd101eee67fcc4f5b2a4a08cf2d" Feb 18 01:06:29 crc kubenswrapper[4791]: I0218 01:06:29.923710 4791 scope.go:117] "RemoveContainer" containerID="1f5c8685a3c26562ce8e876c68918acf9268d83c71bd267cf21c31058d4b8d42" Feb 18 01:06:29 crc kubenswrapper[4791]: I0218 01:06:29.972556 4791 scope.go:117] "RemoveContainer" containerID="57f261c0206fe61ff589d5df0dcb9ea8dd3870246717bbcac307b2d9d1cde62f" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.055662 4791 scope.go:117] "RemoveContainer" containerID="959282c7dbbc9460cf812811932dd7f23d015d63c6f808960ebb5d9e0f157d2d" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.063430 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.111423 4791 scope.go:117] "RemoveContainer" containerID="185b5dd3d2ace79e2994d44192f4e52e6d0979939737042d25b813e126de9f31" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.151080 4791 scope.go:117] "RemoveContainer" containerID="2025b9012e9d047e209e35ef2efbeb64906a7db1080dcc5f50330f6594da1a61" Feb 18 01:06:30 crc kubenswrapper[4791]: E0218 01:06:30.182712 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:06:30 crc kubenswrapper[4791]: E0218 01:06:30.182795 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:06:30 crc kubenswrapper[4791]: E0218 01:06:30.182975 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.183821 4791 scope.go:117] "RemoveContainer" containerID="9c3db097a70ece3f00e20f8cd8cceaf3f98d5c1afb9298937d78e4fc3cf4d607" Feb 18 01:06:30 crc kubenswrapper[4791]: E0218 01:06:30.184805 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.217203 4791 scope.go:117] "RemoveContainer" containerID="2b619710bf1a07710c75b4fb604764fe25a6dd8f600aba6bd2159c423b54bf79" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.244279 4791 scope.go:117] "RemoveContainer" containerID="7e3e54cdb9589ddb430da2d5cdac3c28d586bcf838185f6955cc5d5a1913d8a2" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.268357 4791 scope.go:117] "RemoveContainer" containerID="48d1d8899654959c20dfc366bc088540463b1529389c36a07c0607761d9a5e48" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.294593 4791 scope.go:117] "RemoveContainer" containerID="d20acfdec4fa9356a4d7796a82e8d396d63642783d369df98a343d3fd73d83e0" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.313624 4791 scope.go:117] "RemoveContainer" containerID="6c174fdf234fd334ffe9a6f553d9b44d2b8e85e1dd65161a78424566c3ebfea2" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.344381 4791 scope.go:117] "RemoveContainer" containerID="04c44a90792f718817da2cdadbb0cff6d9972a8c74cf719e02d706058a86a9b3" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.370891 4791 scope.go:117] "RemoveContainer" containerID="15145d3564188230ea41d711877db781eb5e2dd0457d51bfb973753de894ac0b" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.394453 4791 scope.go:117] "RemoveContainer" containerID="e766732420e3a75e8343ebc876f61c8760fe56c6df94ac820eb3633b3c423783" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.420621 4791 scope.go:117] "RemoveContainer" containerID="36b634e49814e36b78a8094cc306e1645e1d190d9b2049876984a9bbdd438e42" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.445826 4791 scope.go:117] "RemoveContainer" containerID="ec178c6773d63da691a8609da2d805f0a70fce192b9b9372a72d1ec60c13c41e" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.466746 4791 scope.go:117] "RemoveContainer" containerID="48f24dd266741d954f6136b6dbf7292affff9b6f8cc7be3be601e1e1c0ed7021" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.490944 4791 scope.go:117] "RemoveContainer" containerID="0efa0e6358b07f086c2ce4083ee352f0b20c59f1d6237cab0949400ad5cd0917" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.535465 4791 scope.go:117] "RemoveContainer" containerID="7f2c3a747c18f59bda4800b9f93d7057a4c95b6de155a8aaee1d5b2d030effb4" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.564265 4791 scope.go:117] "RemoveContainer" containerID="0f45542c0ded31f1475abd3b7bc092d32bb4f794afedf5f1ff3a1f8b75d2c0ca" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.597080 4791 scope.go:117] "RemoveContainer" containerID="6aff4f3a75b912faf1517c2040a1ceda1b60cdb7a5139050118a4fb810dec87f" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.630663 4791 scope.go:117] "RemoveContainer" containerID="506bcfed16f68d1ac4430dbc9253c86aecb1a8036040ab4c28f87ce750c095c4" Feb 18 01:06:30 crc kubenswrapper[4791]: I0218 01:06:30.681845 4791 scope.go:117] "RemoveContainer" containerID="3cdd0d549303a2310267b619bffd2908656f73a39776ad6804cf3aba5a74a45b" Feb 18 01:06:35 crc kubenswrapper[4791]: I0218 01:06:35.062115 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:06:35 crc kubenswrapper[4791]: E0218 01:06:35.063385 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:06:40 crc kubenswrapper[4791]: E0218 01:06:40.065236 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:06:44 crc kubenswrapper[4791]: E0218 01:06:44.063948 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:06:46 crc kubenswrapper[4791]: I0218 01:06:46.061560 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:06:46 crc kubenswrapper[4791]: E0218 01:06:46.062224 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:06:54 crc kubenswrapper[4791]: I0218 01:06:54.066650 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-ghjnw"] Feb 18 01:06:54 crc kubenswrapper[4791]: I0218 01:06:54.078609 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-ghjnw"] Feb 18 01:06:55 crc kubenswrapper[4791]: E0218 01:06:55.063472 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:06:55 crc kubenswrapper[4791]: E0218 01:06:55.063566 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:06:55 crc kubenswrapper[4791]: I0218 01:06:55.104147 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fca332cf-0111-45ef-b20f-726928d11b0b" path="/var/lib/kubelet/pods/fca332cf-0111-45ef-b20f-726928d11b0b/volumes" Feb 18 01:06:57 crc kubenswrapper[4791]: I0218 01:06:57.061800 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:06:57 crc kubenswrapper[4791]: E0218 01:06:57.062639 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:07:04 crc kubenswrapper[4791]: I0218 01:07:04.053793 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ld9m6"] Feb 18 01:07:04 crc kubenswrapper[4791]: I0218 01:07:04.064261 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ld9m6"] Feb 18 01:07:05 crc kubenswrapper[4791]: I0218 01:07:05.073326 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="082acdfd-08b8-4986-8091-22d29ab897f3" path="/var/lib/kubelet/pods/082acdfd-08b8-4986-8091-22d29ab897f3/volumes" Feb 18 01:07:06 crc kubenswrapper[4791]: E0218 01:07:06.064725 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:07:10 crc kubenswrapper[4791]: I0218 01:07:10.027304 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-pjx58"] Feb 18 01:07:10 crc kubenswrapper[4791]: I0218 01:07:10.039297 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-lxpbs"] Feb 18 01:07:10 crc kubenswrapper[4791]: I0218 01:07:10.050613 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-pjx58"] Feb 18 01:07:10 crc kubenswrapper[4791]: I0218 01:07:10.060869 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-lxpbs"] Feb 18 01:07:10 crc kubenswrapper[4791]: E0218 01:07:10.063889 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:07:11 crc kubenswrapper[4791]: I0218 01:07:11.073727 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ddd4e16-7034-4925-8bba-2320640dd8b7" path="/var/lib/kubelet/pods/5ddd4e16-7034-4925-8bba-2320640dd8b7/volumes" Feb 18 01:07:11 crc kubenswrapper[4791]: I0218 01:07:11.075961 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69958a35-9d4e-4e70-b21b-525ffef5d9da" path="/var/lib/kubelet/pods/69958a35-9d4e-4e70-b21b-525ffef5d9da/volumes" Feb 18 01:07:12 crc kubenswrapper[4791]: I0218 01:07:12.061229 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:07:12 crc kubenswrapper[4791]: E0218 01:07:12.061503 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:07:19 crc kubenswrapper[4791]: E0218 01:07:19.063572 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:07:22 crc kubenswrapper[4791]: I0218 01:07:22.035284 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-k7jrj"] Feb 18 01:07:22 crc kubenswrapper[4791]: I0218 01:07:22.044844 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-k7jrj"] Feb 18 01:07:23 crc kubenswrapper[4791]: I0218 01:07:23.073870 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0" path="/var/lib/kubelet/pods/e6a3e202-5e36-45c0-9c8d-80fb4a0f58f0/volumes" Feb 18 01:07:23 crc kubenswrapper[4791]: E0218 01:07:23.164868 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:07:23 crc kubenswrapper[4791]: E0218 01:07:23.164932 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:07:23 crc kubenswrapper[4791]: E0218 01:07:23.165069 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:07:23 crc kubenswrapper[4791]: E0218 01:07:23.166980 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:07:25 crc kubenswrapper[4791]: I0218 01:07:25.062393 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:07:25 crc kubenswrapper[4791]: E0218 01:07:25.063344 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:07:31 crc kubenswrapper[4791]: I0218 01:07:31.612580 4791 scope.go:117] "RemoveContainer" containerID="b3da85a3e4cce9809241c52b63808dc7059fd6a7d9d963b4eb4608fc4c3e1399" Feb 18 01:07:31 crc kubenswrapper[4791]: I0218 01:07:31.643338 4791 scope.go:117] "RemoveContainer" containerID="d1179326184c0774c035050ce21c7a60bc5d040a13893bf2844e7608b33a0205" Feb 18 01:07:31 crc kubenswrapper[4791]: I0218 01:07:31.718177 4791 scope.go:117] "RemoveContainer" containerID="402e56035ac323209b0d643883d293c8bf2dae4d527683ce023127f2618db865" Feb 18 01:07:31 crc kubenswrapper[4791]: I0218 01:07:31.781435 4791 scope.go:117] "RemoveContainer" containerID="b5e0350017d7281404c22de7f53157415a778f82bb05ed7754c2ad736056d7e3" Feb 18 01:07:31 crc kubenswrapper[4791]: I0218 01:07:31.830889 4791 scope.go:117] "RemoveContainer" containerID="7f3f56d0fce90539d28bc79714b4b1825db0ffd6b764b7fc59289c2c1e950c2c" Feb 18 01:07:32 crc kubenswrapper[4791]: E0218 01:07:32.064575 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:07:37 crc kubenswrapper[4791]: E0218 01:07:37.063217 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:07:38 crc kubenswrapper[4791]: I0218 01:07:38.061768 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:07:39 crc kubenswrapper[4791]: I0218 01:07:39.033849 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2"} Feb 18 01:07:44 crc kubenswrapper[4791]: E0218 01:07:44.064644 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:07:52 crc kubenswrapper[4791]: E0218 01:07:52.065390 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:07:59 crc kubenswrapper[4791]: E0218 01:07:59.071919 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:08:04 crc kubenswrapper[4791]: I0218 01:08:04.061895 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-3f61-account-create-update-lt497"] Feb 18 01:08:04 crc kubenswrapper[4791]: I0218 01:08:04.073112 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-3f61-account-create-update-lt497"] Feb 18 01:08:05 crc kubenswrapper[4791]: I0218 01:08:05.074844 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0411e3ff-4c35-4daa-9d29-2f8a46e5ee26" path="/var/lib/kubelet/pods/0411e3ff-4c35-4daa-9d29-2f8a46e5ee26/volumes" Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.052824 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2f60-account-create-update-kwdnz"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.071654 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-nnnff"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.105429 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2f60-account-create-update-kwdnz"] Feb 18 01:08:06 crc kubenswrapper[4791]: E0218 01:08:06.111042 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.149307 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-bbc3-account-create-update-cgjwl"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.161039 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-nnnff"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.177724 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-fw4wg"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.189177 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-bbc3-account-create-update-cgjwl"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.200263 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-fw4wg"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.212272 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-snbt9"] Feb 18 01:08:06 crc kubenswrapper[4791]: I0218 01:08:06.224857 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-snbt9"] Feb 18 01:08:07 crc kubenswrapper[4791]: I0218 01:08:07.078890 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cbbe657-958b-4c43-a636-e04ac880613d" path="/var/lib/kubelet/pods/2cbbe657-958b-4c43-a636-e04ac880613d/volumes" Feb 18 01:08:07 crc kubenswrapper[4791]: I0218 01:08:07.080143 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e5c3f09-b080-4738-a696-f210249c18eb" path="/var/lib/kubelet/pods/2e5c3f09-b080-4738-a696-f210249c18eb/volumes" Feb 18 01:08:07 crc kubenswrapper[4791]: I0218 01:08:07.081060 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6944b18f-146d-47ba-9d71-06d200fa828e" path="/var/lib/kubelet/pods/6944b18f-146d-47ba-9d71-06d200fa828e/volumes" Feb 18 01:08:07 crc kubenswrapper[4791]: I0218 01:08:07.081933 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82100e2b-cadd-4a88-9599-2f0932deacce" path="/var/lib/kubelet/pods/82100e2b-cadd-4a88-9599-2f0932deacce/volumes" Feb 18 01:08:07 crc kubenswrapper[4791]: I0218 01:08:07.083472 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8" path="/var/lib/kubelet/pods/a90c6dfb-27a9-46ad-9cb8-121d9fb4bee8/volumes" Feb 18 01:08:14 crc kubenswrapper[4791]: E0218 01:08:14.064623 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:08:19 crc kubenswrapper[4791]: E0218 01:08:19.072615 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:08:28 crc kubenswrapper[4791]: E0218 01:08:28.070412 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:08:31 crc kubenswrapper[4791]: E0218 01:08:31.066252 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.029551 4791 scope.go:117] "RemoveContainer" containerID="a897ac0c8a9a40988ec4490190b13115319b37cb8d5f74f09d618b172598cb10" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.062319 4791 scope.go:117] "RemoveContainer" containerID="9415a5356d9543f305cfdc305e9ba5d3b9609ec86092e334636a914b51685726" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.195257 4791 scope.go:117] "RemoveContainer" containerID="57eea9c360d9bf5b07bb70c8dd62acf35b6c0cad6ca783b954a64ff6ab667bd0" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.234581 4791 scope.go:117] "RemoveContainer" containerID="467d7b493bb118b406df39c7a96d48a7b75d575444ba161dcc63d3ce235e6204" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.289930 4791 scope.go:117] "RemoveContainer" containerID="6f0084dfc8173b974457439e3a895c9a0625785d1d98f5ad74f8aca329c3c2a8" Feb 18 01:08:32 crc kubenswrapper[4791]: I0218 01:08:32.345070 4791 scope.go:117] "RemoveContainer" containerID="d78c24646b65a67723b9ef8aaf19614c61665cd2fbc6736a8254ed38486948fa" Feb 18 01:08:38 crc kubenswrapper[4791]: I0218 01:08:38.060201 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jpnb"] Feb 18 01:08:38 crc kubenswrapper[4791]: I0218 01:08:38.074388 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6jpnb"] Feb 18 01:08:39 crc kubenswrapper[4791]: I0218 01:08:39.073492 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1d1a31c-1309-4888-8122-16d6be151b19" path="/var/lib/kubelet/pods/f1d1a31c-1309-4888-8122-16d6be151b19/volumes" Feb 18 01:08:41 crc kubenswrapper[4791]: E0218 01:08:41.064189 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:08:46 crc kubenswrapper[4791]: E0218 01:08:46.063727 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:08:56 crc kubenswrapper[4791]: E0218 01:08:56.063660 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:09:00 crc kubenswrapper[4791]: E0218 01:09:00.064644 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:09:01 crc kubenswrapper[4791]: I0218 01:09:01.052231 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-qrrmp"] Feb 18 01:09:01 crc kubenswrapper[4791]: I0218 01:09:01.077121 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-qrrmp"] Feb 18 01:09:02 crc kubenswrapper[4791]: I0218 01:09:02.093233 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-n8x6z"] Feb 18 01:09:02 crc kubenswrapper[4791]: I0218 01:09:02.149248 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-n8x6z"] Feb 18 01:09:03 crc kubenswrapper[4791]: I0218 01:09:03.075330 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8aa052d2-11d5-4497-86e2-23c4e1f72f98" path="/var/lib/kubelet/pods/8aa052d2-11d5-4497-86e2-23c4e1f72f98/volumes" Feb 18 01:09:03 crc kubenswrapper[4791]: I0218 01:09:03.076274 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d90b881e-3f26-4065-addf-b131f132fef9" path="/var/lib/kubelet/pods/d90b881e-3f26-4065-addf-b131f132fef9/volumes" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.031093 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0d1c-account-create-update-wj6st"] Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.043337 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0d1c-account-create-update-wj6st"] Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.075734 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc0535df-5769-4e87-bfbd-5bec07e3dda3" path="/var/lib/kubelet/pods/cc0535df-5769-4e87-bfbd-5bec07e3dda3/volumes" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.255574 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.258716 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.275199 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.333654 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.333717 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.333801 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzdkz\" (UniqueName: \"kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.435878 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.435972 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.436076 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzdkz\" (UniqueName: \"kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.436761 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.437120 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.460217 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzdkz\" (UniqueName: \"kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz\") pod \"redhat-operators-xvtzv\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:05 crc kubenswrapper[4791]: I0218 01:09:05.592775 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.030759 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-tw6qw"] Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.041519 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-tw6qw"] Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.104223 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.971252 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerID="ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf" exitCode=0 Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.971322 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerDied","Data":"ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf"} Feb 18 01:09:06 crc kubenswrapper[4791]: I0218 01:09:06.971551 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerStarted","Data":"af5690ba0d529e78be7066beab9fbe19ad8e33b0671e47da6d1e9a4d7610076a"} Feb 18 01:09:07 crc kubenswrapper[4791]: I0218 01:09:07.072715 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fb61a28-f5c5-4c33-8706-4eb4a7160424" path="/var/lib/kubelet/pods/6fb61a28-f5c5-4c33-8706-4eb4a7160424/volumes" Feb 18 01:09:09 crc kubenswrapper[4791]: I0218 01:09:09.014339 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerStarted","Data":"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c"} Feb 18 01:09:11 crc kubenswrapper[4791]: E0218 01:09:11.064097 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:09:11 crc kubenswrapper[4791]: E0218 01:09:11.064990 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:09:13 crc kubenswrapper[4791]: I0218 01:09:13.064028 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerID="273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c" exitCode=0 Feb 18 01:09:13 crc kubenswrapper[4791]: I0218 01:09:13.072034 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerDied","Data":"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c"} Feb 18 01:09:14 crc kubenswrapper[4791]: I0218 01:09:14.077568 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerStarted","Data":"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138"} Feb 18 01:09:14 crc kubenswrapper[4791]: I0218 01:09:14.097290 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xvtzv" podStartSLOduration=2.563979271 podStartE2EDuration="9.097271803s" podCreationTimestamp="2026-02-18 01:09:05 +0000 UTC" firstStartedPulling="2026-02-18 01:09:06.973404913 +0000 UTC m=+2088.541418083" lastFinishedPulling="2026-02-18 01:09:13.506697425 +0000 UTC m=+2095.074710615" observedRunningTime="2026-02-18 01:09:14.092761154 +0000 UTC m=+2095.660774314" watchObservedRunningTime="2026-02-18 01:09:14.097271803 +0000 UTC m=+2095.665284973" Feb 18 01:09:15 crc kubenswrapper[4791]: I0218 01:09:15.593208 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:15 crc kubenswrapper[4791]: I0218 01:09:15.593292 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:16 crc kubenswrapper[4791]: I0218 01:09:16.658391 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xvtzv" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" probeResult="failure" output=< Feb 18 01:09:16 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:09:16 crc kubenswrapper[4791]: > Feb 18 01:09:20 crc kubenswrapper[4791]: I0218 01:09:20.037038 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-7bqdn"] Feb 18 01:09:20 crc kubenswrapper[4791]: I0218 01:09:20.050577 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-7bqdn"] Feb 18 01:09:21 crc kubenswrapper[4791]: I0218 01:09:21.080440 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34c9d979-fc5d-4c7d-951c-ff13f8814802" path="/var/lib/kubelet/pods/34c9d979-fc5d-4c7d-951c-ff13f8814802/volumes" Feb 18 01:09:23 crc kubenswrapper[4791]: E0218 01:09:23.064219 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:09:25 crc kubenswrapper[4791]: E0218 01:09:25.064208 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:09:26 crc kubenswrapper[4791]: I0218 01:09:26.646023 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xvtzv" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" probeResult="failure" output=< Feb 18 01:09:26 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:09:26 crc kubenswrapper[4791]: > Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.615349 4791 scope.go:117] "RemoveContainer" containerID="461c1e0ba7ac0aee40962224c0e5b878b45e335c985f36f6201801cd79b20d16" Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.645744 4791 scope.go:117] "RemoveContainer" containerID="64f3d2f00c0405ca692a5efb494493436d782e5a5ff691a2062cf8b44627c753" Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.697052 4791 scope.go:117] "RemoveContainer" containerID="f4a8df5f98b22717c8caa569d9a6844f71b4c8379fac62b6a9d0f35b3f46e0f9" Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.759197 4791 scope.go:117] "RemoveContainer" containerID="047f05aa57fed5d29f63ce2579a8d0fdef33b00590b0538db1f3a0405b7252eb" Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.808572 4791 scope.go:117] "RemoveContainer" containerID="722311e8e860f6d69a718e1e20ad3069f3bbf561752bd9e2e48aba6cea14564f" Feb 18 01:09:32 crc kubenswrapper[4791]: I0218 01:09:32.866222 4791 scope.go:117] "RemoveContainer" containerID="2ecf53d2211beec4940711c5b2dfa9bd19e0857d9d27dc4019b23acb88e376e2" Feb 18 01:09:34 crc kubenswrapper[4791]: E0218 01:09:34.064078 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:09:35 crc kubenswrapper[4791]: I0218 01:09:35.658969 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:35 crc kubenswrapper[4791]: I0218 01:09:35.721578 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:36 crc kubenswrapper[4791]: I0218 01:09:36.455394 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.345669 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xvtzv" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" containerID="cri-o://c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138" gracePeriod=2 Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.846342 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.998092 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content\") pod \"c2e02c80-0648-4c25-bd84-a71d34c144f7\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.998222 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities\") pod \"c2e02c80-0648-4c25-bd84-a71d34c144f7\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.998266 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzdkz\" (UniqueName: \"kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz\") pod \"c2e02c80-0648-4c25-bd84-a71d34c144f7\" (UID: \"c2e02c80-0648-4c25-bd84-a71d34c144f7\") " Feb 18 01:09:37 crc kubenswrapper[4791]: I0218 01:09:37.999102 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities" (OuterVolumeSpecName: "utilities") pod "c2e02c80-0648-4c25-bd84-a71d34c144f7" (UID: "c2e02c80-0648-4c25-bd84-a71d34c144f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.013393 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz" (OuterVolumeSpecName: "kube-api-access-gzdkz") pod "c2e02c80-0648-4c25-bd84-a71d34c144f7" (UID: "c2e02c80-0648-4c25-bd84-a71d34c144f7"). InnerVolumeSpecName "kube-api-access-gzdkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.101000 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.101029 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzdkz\" (UniqueName: \"kubernetes.io/projected/c2e02c80-0648-4c25-bd84-a71d34c144f7-kube-api-access-gzdkz\") on node \"crc\" DevicePath \"\"" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.135829 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2e02c80-0648-4c25-bd84-a71d34c144f7" (UID: "c2e02c80-0648-4c25-bd84-a71d34c144f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.211071 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e02c80-0648-4c25-bd84-a71d34c144f7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.358286 4791 generic.go:334] "Generic (PLEG): container finished" podID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerID="c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138" exitCode=0 Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.358316 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvtzv" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.358369 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerDied","Data":"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138"} Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.358406 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvtzv" event={"ID":"c2e02c80-0648-4c25-bd84-a71d34c144f7","Type":"ContainerDied","Data":"af5690ba0d529e78be7066beab9fbe19ad8e33b0671e47da6d1e9a4d7610076a"} Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.358462 4791 scope.go:117] "RemoveContainer" containerID="c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.391104 4791 scope.go:117] "RemoveContainer" containerID="273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.400938 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.417980 4791 scope.go:117] "RemoveContainer" containerID="ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.421140 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xvtzv"] Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.467738 4791 scope.go:117] "RemoveContainer" containerID="c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138" Feb 18 01:09:38 crc kubenswrapper[4791]: E0218 01:09:38.468386 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138\": container with ID starting with c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138 not found: ID does not exist" containerID="c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.468500 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138"} err="failed to get container status \"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138\": rpc error: code = NotFound desc = could not find container \"c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138\": container with ID starting with c2b1645dc508d040d79e63e6328f86afbc237da530124e8fe767ec86eac8f138 not found: ID does not exist" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.468596 4791 scope.go:117] "RemoveContainer" containerID="273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c" Feb 18 01:09:38 crc kubenswrapper[4791]: E0218 01:09:38.468995 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c\": container with ID starting with 273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c not found: ID does not exist" containerID="273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.469034 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c"} err="failed to get container status \"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c\": rpc error: code = NotFound desc = could not find container \"273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c\": container with ID starting with 273fd21904e6a41bd30b34718aaf67934898475d0fac4521a95c6b848ffa714c not found: ID does not exist" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.469060 4791 scope.go:117] "RemoveContainer" containerID="ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf" Feb 18 01:09:38 crc kubenswrapper[4791]: E0218 01:09:38.469432 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf\": container with ID starting with ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf not found: ID does not exist" containerID="ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf" Feb 18 01:09:38 crc kubenswrapper[4791]: I0218 01:09:38.469543 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf"} err="failed to get container status \"ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf\": rpc error: code = NotFound desc = could not find container \"ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf\": container with ID starting with ff251f35fc6cacecb5ccfbc96e84ee5cc7cd2573c22e82f2c5f631dbc6f77adf not found: ID does not exist" Feb 18 01:09:39 crc kubenswrapper[4791]: I0218 01:09:39.074543 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" path="/var/lib/kubelet/pods/c2e02c80-0648-4c25-bd84-a71d34c144f7/volumes" Feb 18 01:09:40 crc kubenswrapper[4791]: E0218 01:09:40.087467 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:09:48 crc kubenswrapper[4791]: I0218 01:09:48.057453 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-4r4tl"] Feb 18 01:09:48 crc kubenswrapper[4791]: I0218 01:09:48.074250 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-4r4tl"] Feb 18 01:09:49 crc kubenswrapper[4791]: I0218 01:09:49.100909 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2b1a453-7c2e-47bd-8c65-c0d6e1939741" path="/var/lib/kubelet/pods/c2b1a453-7c2e-47bd-8c65-c0d6e1939741/volumes" Feb 18 01:09:49 crc kubenswrapper[4791]: E0218 01:09:49.105253 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:09:53 crc kubenswrapper[4791]: E0218 01:09:53.064470 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:09:56 crc kubenswrapper[4791]: I0218 01:09:56.800115 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:09:56 crc kubenswrapper[4791]: I0218 01:09:56.800793 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:10:03 crc kubenswrapper[4791]: E0218 01:10:03.065429 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:10:06 crc kubenswrapper[4791]: E0218 01:10:06.063601 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:10:16 crc kubenswrapper[4791]: E0218 01:10:16.064324 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:10:18 crc kubenswrapper[4791]: E0218 01:10:18.063641 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:10:26 crc kubenswrapper[4791]: I0218 01:10:26.799992 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:10:26 crc kubenswrapper[4791]: I0218 01:10:26.800516 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:10:29 crc kubenswrapper[4791]: E0218 01:10:29.070983 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:10:30 crc kubenswrapper[4791]: E0218 01:10:30.063420 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:10:33 crc kubenswrapper[4791]: I0218 01:10:33.090515 4791 scope.go:117] "RemoveContainer" containerID="db7096f2287b70612c4065644639e11d95c5e61ee0123c974484d99a30eb398d" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.711551 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:36 crc kubenswrapper[4791]: E0218 01:10:36.712504 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="extract-content" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.712516 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="extract-content" Feb 18 01:10:36 crc kubenswrapper[4791]: E0218 01:10:36.712531 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.712538 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" Feb 18 01:10:36 crc kubenswrapper[4791]: E0218 01:10:36.712551 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="extract-utilities" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.712557 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="extract-utilities" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.712828 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e02c80-0648-4c25-bd84-a71d34c144f7" containerName="registry-server" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.714535 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.726303 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.808514 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.808783 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmsqw\" (UniqueName: \"kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.808870 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.910520 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.910699 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmsqw\" (UniqueName: \"kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.910730 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.910972 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.911011 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:36 crc kubenswrapper[4791]: I0218 01:10:36.931259 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmsqw\" (UniqueName: \"kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw\") pod \"redhat-marketplace-z7vk7\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:37 crc kubenswrapper[4791]: I0218 01:10:37.042431 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:37 crc kubenswrapper[4791]: I0218 01:10:37.540847 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:38 crc kubenswrapper[4791]: I0218 01:10:38.560311 4791 generic.go:334] "Generic (PLEG): container finished" podID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerID="28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab" exitCode=0 Feb 18 01:10:38 crc kubenswrapper[4791]: I0218 01:10:38.560704 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerDied","Data":"28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab"} Feb 18 01:10:38 crc kubenswrapper[4791]: I0218 01:10:38.560735 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerStarted","Data":"11d832a7225598b5ee683026be45dbfbdc2c999e52ce6ad0d1d3cb0f1453bb20"} Feb 18 01:10:39 crc kubenswrapper[4791]: I0218 01:10:39.570821 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerStarted","Data":"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04"} Feb 18 01:10:40 crc kubenswrapper[4791]: I0218 01:10:40.583337 4791 generic.go:334] "Generic (PLEG): container finished" podID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerID="e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04" exitCode=0 Feb 18 01:10:40 crc kubenswrapper[4791]: I0218 01:10:40.583446 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerDied","Data":"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04"} Feb 18 01:10:41 crc kubenswrapper[4791]: I0218 01:10:41.594280 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerStarted","Data":"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6"} Feb 18 01:10:41 crc kubenswrapper[4791]: I0218 01:10:41.611960 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z7vk7" podStartSLOduration=3.18823249 podStartE2EDuration="5.611943225s" podCreationTimestamp="2026-02-18 01:10:36 +0000 UTC" firstStartedPulling="2026-02-18 01:10:38.562758515 +0000 UTC m=+2180.130771675" lastFinishedPulling="2026-02-18 01:10:40.98646923 +0000 UTC m=+2182.554482410" observedRunningTime="2026-02-18 01:10:41.608731146 +0000 UTC m=+2183.176744316" watchObservedRunningTime="2026-02-18 01:10:41.611943225 +0000 UTC m=+2183.179956395" Feb 18 01:10:44 crc kubenswrapper[4791]: E0218 01:10:44.066116 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:10:45 crc kubenswrapper[4791]: E0218 01:10:45.062687 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:10:47 crc kubenswrapper[4791]: I0218 01:10:47.043441 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:47 crc kubenswrapper[4791]: I0218 01:10:47.043698 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:47 crc kubenswrapper[4791]: I0218 01:10:47.105125 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:47 crc kubenswrapper[4791]: I0218 01:10:47.755760 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:47 crc kubenswrapper[4791]: I0218 01:10:47.836688 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:49 crc kubenswrapper[4791]: I0218 01:10:49.679564 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z7vk7" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="registry-server" containerID="cri-o://9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6" gracePeriod=2 Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.329688 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.485712 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content\") pod \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.486205 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities\") pod \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.486334 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmsqw\" (UniqueName: \"kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw\") pod \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\" (UID: \"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c\") " Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.486951 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities" (OuterVolumeSpecName: "utilities") pod "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" (UID: "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.492660 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw" (OuterVolumeSpecName: "kube-api-access-fmsqw") pod "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" (UID: "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c"). InnerVolumeSpecName "kube-api-access-fmsqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.510950 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" (UID: "fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.588935 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmsqw\" (UniqueName: \"kubernetes.io/projected/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-kube-api-access-fmsqw\") on node \"crc\" DevicePath \"\"" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.588967 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.588977 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.693504 4791 generic.go:334] "Generic (PLEG): container finished" podID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerID="9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6" exitCode=0 Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.693558 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerDied","Data":"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6"} Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.693591 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7vk7" event={"ID":"fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c","Type":"ContainerDied","Data":"11d832a7225598b5ee683026be45dbfbdc2c999e52ce6ad0d1d3cb0f1453bb20"} Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.693611 4791 scope.go:117] "RemoveContainer" containerID="9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.693810 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7vk7" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.729661 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.746922 4791 scope.go:117] "RemoveContainer" containerID="e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.751729 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7vk7"] Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.769018 4791 scope.go:117] "RemoveContainer" containerID="28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.822389 4791 scope.go:117] "RemoveContainer" containerID="9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6" Feb 18 01:10:50 crc kubenswrapper[4791]: E0218 01:10:50.823531 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6\": container with ID starting with 9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6 not found: ID does not exist" containerID="9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.823570 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6"} err="failed to get container status \"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6\": rpc error: code = NotFound desc = could not find container \"9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6\": container with ID starting with 9829fc2d1f7e0a2ebd1613c9adbec7ae2f105916ca772ad7b108853aef0b09a6 not found: ID does not exist" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.823597 4791 scope.go:117] "RemoveContainer" containerID="e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04" Feb 18 01:10:50 crc kubenswrapper[4791]: E0218 01:10:50.823952 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04\": container with ID starting with e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04 not found: ID does not exist" containerID="e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.823985 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04"} err="failed to get container status \"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04\": rpc error: code = NotFound desc = could not find container \"e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04\": container with ID starting with e3e7089019de17ee5f8ef927883711757789385faaf307f66feb2667735d7c04 not found: ID does not exist" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.824007 4791 scope.go:117] "RemoveContainer" containerID="28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab" Feb 18 01:10:50 crc kubenswrapper[4791]: E0218 01:10:50.824553 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab\": container with ID starting with 28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab not found: ID does not exist" containerID="28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab" Feb 18 01:10:50 crc kubenswrapper[4791]: I0218 01:10:50.824649 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab"} err="failed to get container status \"28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab\": rpc error: code = NotFound desc = could not find container \"28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab\": container with ID starting with 28d5125b6cc185ed36ff315a97a4009baeccf5c1bc661a0d016b7755bcd35eab not found: ID does not exist" Feb 18 01:10:51 crc kubenswrapper[4791]: I0218 01:10:51.072828 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" path="/var/lib/kubelet/pods/fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c/volumes" Feb 18 01:10:56 crc kubenswrapper[4791]: E0218 01:10:56.063994 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:10:56 crc kubenswrapper[4791]: I0218 01:10:56.800388 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:10:56 crc kubenswrapper[4791]: I0218 01:10:56.800747 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:10:56 crc kubenswrapper[4791]: I0218 01:10:56.800809 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:10:56 crc kubenswrapper[4791]: I0218 01:10:56.802096 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:10:56 crc kubenswrapper[4791]: I0218 01:10:56.802223 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2" gracePeriod=600 Feb 18 01:10:57 crc kubenswrapper[4791]: I0218 01:10:57.774962 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2" exitCode=0 Feb 18 01:10:57 crc kubenswrapper[4791]: I0218 01:10:57.775030 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2"} Feb 18 01:10:57 crc kubenswrapper[4791]: I0218 01:10:57.775693 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1"} Feb 18 01:10:57 crc kubenswrapper[4791]: I0218 01:10:57.775718 4791 scope.go:117] "RemoveContainer" containerID="96581330093c622b698c7dcf5f56174d5a68f6da90cd5ff685869e2dfabfd24a" Feb 18 01:11:00 crc kubenswrapper[4791]: E0218 01:11:00.064450 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:11:10 crc kubenswrapper[4791]: E0218 01:11:10.064136 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:11:13 crc kubenswrapper[4791]: E0218 01:11:13.063229 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:11:24 crc kubenswrapper[4791]: E0218 01:11:24.063215 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:11:27 crc kubenswrapper[4791]: E0218 01:11:27.064521 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:11:35 crc kubenswrapper[4791]: E0218 01:11:35.062905 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:11:41 crc kubenswrapper[4791]: I0218 01:11:41.063474 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:11:41 crc kubenswrapper[4791]: E0218 01:11:41.188541 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:11:41 crc kubenswrapper[4791]: E0218 01:11:41.188589 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:11:41 crc kubenswrapper[4791]: E0218 01:11:41.188703 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:11:41 crc kubenswrapper[4791]: E0218 01:11:41.189850 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:11:48 crc kubenswrapper[4791]: E0218 01:11:48.064601 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:11:52 crc kubenswrapper[4791]: E0218 01:11:52.063885 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:12:03 crc kubenswrapper[4791]: E0218 01:12:03.069583 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:12:04 crc kubenswrapper[4791]: E0218 01:12:04.065306 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:12:07 crc kubenswrapper[4791]: I0218 01:12:07.762343 4791 generic.go:334] "Generic (PLEG): container finished" podID="6fbf1afd-08c9-4fb1-87b9-816b3846145b" containerID="6097a7048becbff785028b078c71413b1f6be37139420083d44a0b29843a717d" exitCode=2 Feb 18 01:12:07 crc kubenswrapper[4791]: I0218 01:12:07.762453 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" event={"ID":"6fbf1afd-08c9-4fb1-87b9-816b3846145b","Type":"ContainerDied","Data":"6097a7048becbff785028b078c71413b1f6be37139420083d44a0b29843a717d"} Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.291852 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.399014 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam\") pod \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.399102 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory\") pod \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.399344 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6hqw\" (UniqueName: \"kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw\") pod \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\" (UID: \"6fbf1afd-08c9-4fb1-87b9-816b3846145b\") " Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.408112 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw" (OuterVolumeSpecName: "kube-api-access-z6hqw") pod "6fbf1afd-08c9-4fb1-87b9-816b3846145b" (UID: "6fbf1afd-08c9-4fb1-87b9-816b3846145b"). InnerVolumeSpecName "kube-api-access-z6hqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.437977 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6fbf1afd-08c9-4fb1-87b9-816b3846145b" (UID: "6fbf1afd-08c9-4fb1-87b9-816b3846145b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.449650 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory" (OuterVolumeSpecName: "inventory") pod "6fbf1afd-08c9-4fb1-87b9-816b3846145b" (UID: "6fbf1afd-08c9-4fb1-87b9-816b3846145b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.502038 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.502086 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbf1afd-08c9-4fb1-87b9-816b3846145b-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.502099 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6hqw\" (UniqueName: \"kubernetes.io/projected/6fbf1afd-08c9-4fb1-87b9-816b3846145b-kube-api-access-z6hqw\") on node \"crc\" DevicePath \"\"" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.786265 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" event={"ID":"6fbf1afd-08c9-4fb1-87b9-816b3846145b","Type":"ContainerDied","Data":"5831b9baafdedccec884acd39eb40a1624424d42f8c4125703783da3f63f37e0"} Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.786760 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5831b9baafdedccec884acd39eb40a1624424d42f8c4125703783da3f63f37e0" Feb 18 01:12:09 crc kubenswrapper[4791]: I0218 01:12:09.786465 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-55w62" Feb 18 01:12:14 crc kubenswrapper[4791]: E0218 01:12:14.065063 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032128 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr"] Feb 18 01:12:16 crc kubenswrapper[4791]: E0218 01:12:16.032595 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbf1afd-08c9-4fb1-87b9-816b3846145b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032609 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbf1afd-08c9-4fb1-87b9-816b3846145b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:12:16 crc kubenswrapper[4791]: E0218 01:12:16.032641 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="extract-content" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032647 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="extract-content" Feb 18 01:12:16 crc kubenswrapper[4791]: E0218 01:12:16.032671 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="extract-utilities" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032677 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="extract-utilities" Feb 18 01:12:16 crc kubenswrapper[4791]: E0218 01:12:16.032693 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="registry-server" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032700 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="registry-server" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032908 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fbf1afd-08c9-4fb1-87b9-816b3846145b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.032933 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdfc4f1c-4a13-4dc6-9cb9-221e0cdaec0c" containerName="registry-server" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.033649 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.036699 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.036944 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.037084 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.043629 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.052140 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr"] Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.110324 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.110628 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5hg5\" (UniqueName: \"kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.110682 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.212870 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.212934 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5hg5\" (UniqueName: \"kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.213017 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.219509 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.219869 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.244855 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5hg5\" (UniqueName: \"kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.361562 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:12:16 crc kubenswrapper[4791]: W0218 01:12:16.900664 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80cb92f6_d3a1_44dc_96de_ce408815087a.slice/crio-3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94 WatchSource:0}: Error finding container 3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94: Status 404 returned error can't find the container with id 3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94 Feb 18 01:12:16 crc kubenswrapper[4791]: I0218 01:12:16.906297 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr"] Feb 18 01:12:17 crc kubenswrapper[4791]: I0218 01:12:17.871819 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" event={"ID":"80cb92f6-d3a1-44dc-96de-ce408815087a","Type":"ContainerStarted","Data":"b0c2b2b054dab52febe1671d24a91e5a193e26b86cf446d77aa49dcd3ab0bd4d"} Feb 18 01:12:17 crc kubenswrapper[4791]: I0218 01:12:17.872690 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" event={"ID":"80cb92f6-d3a1-44dc-96de-ce408815087a","Type":"ContainerStarted","Data":"3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94"} Feb 18 01:12:17 crc kubenswrapper[4791]: I0218 01:12:17.897651 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" podStartSLOduration=1.278181642 podStartE2EDuration="1.897623189s" podCreationTimestamp="2026-02-18 01:12:16 +0000 UTC" firstStartedPulling="2026-02-18 01:12:16.903323175 +0000 UTC m=+2278.471336345" lastFinishedPulling="2026-02-18 01:12:17.522764722 +0000 UTC m=+2279.090777892" observedRunningTime="2026-02-18 01:12:17.890034066 +0000 UTC m=+2279.458047266" watchObservedRunningTime="2026-02-18 01:12:17.897623189 +0000 UTC m=+2279.465636379" Feb 18 01:12:18 crc kubenswrapper[4791]: E0218 01:12:18.063223 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:12:25 crc kubenswrapper[4791]: E0218 01:12:25.154722 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:12:25 crc kubenswrapper[4791]: E0218 01:12:25.155375 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:12:25 crc kubenswrapper[4791]: E0218 01:12:25.155518 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:12:25 crc kubenswrapper[4791]: E0218 01:12:25.156706 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:12:30 crc kubenswrapper[4791]: E0218 01:12:30.065027 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:12:38 crc kubenswrapper[4791]: E0218 01:12:38.065405 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:12:44 crc kubenswrapper[4791]: E0218 01:12:44.062904 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:12:53 crc kubenswrapper[4791]: E0218 01:12:53.065088 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:12:59 crc kubenswrapper[4791]: E0218 01:12:59.071270 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:13:08 crc kubenswrapper[4791]: E0218 01:13:08.064557 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:13:14 crc kubenswrapper[4791]: E0218 01:13:14.064460 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:13:19 crc kubenswrapper[4791]: E0218 01:13:19.071404 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:13:26 crc kubenswrapper[4791]: E0218 01:13:26.064715 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:13:26 crc kubenswrapper[4791]: I0218 01:13:26.799650 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:13:26 crc kubenswrapper[4791]: I0218 01:13:26.799714 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:13:30 crc kubenswrapper[4791]: E0218 01:13:30.063797 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:13:37 crc kubenswrapper[4791]: E0218 01:13:37.063862 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:13:41 crc kubenswrapper[4791]: E0218 01:13:41.063298 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:13:50 crc kubenswrapper[4791]: E0218 01:13:50.063389 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:13:54 crc kubenswrapper[4791]: E0218 01:13:54.063625 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:13:56 crc kubenswrapper[4791]: I0218 01:13:56.800733 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:13:56 crc kubenswrapper[4791]: I0218 01:13:56.801098 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:14:04 crc kubenswrapper[4791]: E0218 01:14:04.065089 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:14:05 crc kubenswrapper[4791]: E0218 01:14:05.064272 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:14:19 crc kubenswrapper[4791]: E0218 01:14:19.085047 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:14:19 crc kubenswrapper[4791]: E0218 01:14:19.085562 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:14:26 crc kubenswrapper[4791]: I0218 01:14:26.799975 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:14:26 crc kubenswrapper[4791]: I0218 01:14:26.800669 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:14:26 crc kubenswrapper[4791]: I0218 01:14:26.800727 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:14:26 crc kubenswrapper[4791]: I0218 01:14:26.801880 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:14:26 crc kubenswrapper[4791]: I0218 01:14:26.801981 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" gracePeriod=600 Feb 18 01:14:26 crc kubenswrapper[4791]: E0218 01:14:26.931616 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:14:27 crc kubenswrapper[4791]: I0218 01:14:27.387763 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" exitCode=0 Feb 18 01:14:27 crc kubenswrapper[4791]: I0218 01:14:27.387811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1"} Feb 18 01:14:27 crc kubenswrapper[4791]: I0218 01:14:27.387854 4791 scope.go:117] "RemoveContainer" containerID="0e5a0c0f5de93c38ae18d4c74a60d865d534e2daf4ef3ad9f6d4657064cadfe2" Feb 18 01:14:27 crc kubenswrapper[4791]: I0218 01:14:27.388604 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:14:27 crc kubenswrapper[4791]: E0218 01:14:27.389309 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:14:31 crc kubenswrapper[4791]: E0218 01:14:31.067698 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:14:32 crc kubenswrapper[4791]: E0218 01:14:32.063939 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:14:38 crc kubenswrapper[4791]: I0218 01:14:38.063590 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:14:38 crc kubenswrapper[4791]: E0218 01:14:38.064332 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:14:43 crc kubenswrapper[4791]: E0218 01:14:43.068880 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:14:45 crc kubenswrapper[4791]: E0218 01:14:45.065290 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:14:52 crc kubenswrapper[4791]: I0218 01:14:52.061179 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:14:52 crc kubenswrapper[4791]: E0218 01:14:52.061910 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:14:58 crc kubenswrapper[4791]: E0218 01:14:58.063097 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:14:58 crc kubenswrapper[4791]: E0218 01:14:58.063098 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.161248 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql"] Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.167016 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.171317 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.173430 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.188238 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql"] Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.308797 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.308868 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzhtp\" (UniqueName: \"kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.308940 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.410719 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.410764 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzhtp\" (UniqueName: \"kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.410836 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.411726 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.417861 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.429624 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzhtp\" (UniqueName: \"kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp\") pod \"collect-profiles-29522955-hqzql\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.492680 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:00 crc kubenswrapper[4791]: I0218 01:15:00.991552 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql"] Feb 18 01:15:01 crc kubenswrapper[4791]: I0218 01:15:01.779020 4791 generic.go:334] "Generic (PLEG): container finished" podID="6494cc9c-ba04-49f3-bd9a-11dba584dea7" containerID="ee848c3633205542201b3615c1e669532006a61f6dd003fa73cc7fc4971989ce" exitCode=0 Feb 18 01:15:01 crc kubenswrapper[4791]: I0218 01:15:01.779126 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" event={"ID":"6494cc9c-ba04-49f3-bd9a-11dba584dea7","Type":"ContainerDied","Data":"ee848c3633205542201b3615c1e669532006a61f6dd003fa73cc7fc4971989ce"} Feb 18 01:15:01 crc kubenswrapper[4791]: I0218 01:15:01.779398 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" event={"ID":"6494cc9c-ba04-49f3-bd9a-11dba584dea7","Type":"ContainerStarted","Data":"839481e7d510c3b019ddf54cef0b9e5bcc92cefd33491c6080e9781967238bf3"} Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.235844 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.385772 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume\") pod \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.385988 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume\") pod \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.386069 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzhtp\" (UniqueName: \"kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp\") pod \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\" (UID: \"6494cc9c-ba04-49f3-bd9a-11dba584dea7\") " Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.386732 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume" (OuterVolumeSpecName: "config-volume") pod "6494cc9c-ba04-49f3-bd9a-11dba584dea7" (UID: "6494cc9c-ba04-49f3-bd9a-11dba584dea7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.392497 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6494cc9c-ba04-49f3-bd9a-11dba584dea7" (UID: "6494cc9c-ba04-49f3-bd9a-11dba584dea7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.395408 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp" (OuterVolumeSpecName: "kube-api-access-tzhtp") pod "6494cc9c-ba04-49f3-bd9a-11dba584dea7" (UID: "6494cc9c-ba04-49f3-bd9a-11dba584dea7"). InnerVolumeSpecName "kube-api-access-tzhtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.489048 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6494cc9c-ba04-49f3-bd9a-11dba584dea7-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.489091 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6494cc9c-ba04-49f3-bd9a-11dba584dea7-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.489102 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzhtp\" (UniqueName: \"kubernetes.io/projected/6494cc9c-ba04-49f3-bd9a-11dba584dea7-kube-api-access-tzhtp\") on node \"crc\" DevicePath \"\"" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.800476 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" event={"ID":"6494cc9c-ba04-49f3-bd9a-11dba584dea7","Type":"ContainerDied","Data":"839481e7d510c3b019ddf54cef0b9e5bcc92cefd33491c6080e9781967238bf3"} Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.800869 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="839481e7d510c3b019ddf54cef0b9e5bcc92cefd33491c6080e9781967238bf3" Feb 18 01:15:03 crc kubenswrapper[4791]: I0218 01:15:03.800526 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql" Feb 18 01:15:04 crc kubenswrapper[4791]: I0218 01:15:04.312860 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k"] Feb 18 01:15:04 crc kubenswrapper[4791]: I0218 01:15:04.321835 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522910-zlz2k"] Feb 18 01:15:05 crc kubenswrapper[4791]: I0218 01:15:05.061976 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:15:05 crc kubenswrapper[4791]: E0218 01:15:05.063116 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:15:05 crc kubenswrapper[4791]: I0218 01:15:05.084144 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02" path="/var/lib/kubelet/pods/eb9dc76e-0a8f-4eb8-bb8d-8b101b51ff02/volumes" Feb 18 01:15:12 crc kubenswrapper[4791]: E0218 01:15:12.064477 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:15:13 crc kubenswrapper[4791]: E0218 01:15:13.064400 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:15:17 crc kubenswrapper[4791]: I0218 01:15:17.062127 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:15:17 crc kubenswrapper[4791]: E0218 01:15:17.063437 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:15:23 crc kubenswrapper[4791]: E0218 01:15:23.066909 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:15:25 crc kubenswrapper[4791]: E0218 01:15:25.063415 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:15:31 crc kubenswrapper[4791]: I0218 01:15:31.061897 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:15:31 crc kubenswrapper[4791]: E0218 01:15:31.062755 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:15:33 crc kubenswrapper[4791]: I0218 01:15:33.282853 4791 scope.go:117] "RemoveContainer" containerID="4205f4b881d0c587d0aaf43aa7185653b959a5c4f9c11d22f324efb0c8116a2e" Feb 18 01:15:37 crc kubenswrapper[4791]: E0218 01:15:37.067420 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:15:38 crc kubenswrapper[4791]: E0218 01:15:38.064507 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:15:45 crc kubenswrapper[4791]: I0218 01:15:45.062535 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:15:45 crc kubenswrapper[4791]: E0218 01:15:45.063697 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:15:50 crc kubenswrapper[4791]: E0218 01:15:50.064104 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:15:51 crc kubenswrapper[4791]: E0218 01:15:51.063687 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:15:58 crc kubenswrapper[4791]: I0218 01:15:58.061727 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:15:58 crc kubenswrapper[4791]: E0218 01:15:58.062513 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:16:02 crc kubenswrapper[4791]: E0218 01:16:02.064273 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:16:03 crc kubenswrapper[4791]: E0218 01:16:03.065806 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:16:12 crc kubenswrapper[4791]: I0218 01:16:12.061676 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:16:12 crc kubenswrapper[4791]: E0218 01:16:12.062586 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:16:16 crc kubenswrapper[4791]: E0218 01:16:16.064762 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:16:18 crc kubenswrapper[4791]: E0218 01:16:18.068719 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:16:23 crc kubenswrapper[4791]: I0218 01:16:23.062811 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:16:23 crc kubenswrapper[4791]: E0218 01:16:23.065130 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:16:27 crc kubenswrapper[4791]: E0218 01:16:27.337764 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:16:29 crc kubenswrapper[4791]: E0218 01:16:29.069544 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:16:34 crc kubenswrapper[4791]: I0218 01:16:34.063743 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:16:34 crc kubenswrapper[4791]: E0218 01:16:34.064916 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:16:41 crc kubenswrapper[4791]: E0218 01:16:41.065083 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:16:43 crc kubenswrapper[4791]: I0218 01:16:43.073580 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:16:43 crc kubenswrapper[4791]: E0218 01:16:43.202520 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:16:43 crc kubenswrapper[4791]: E0218 01:16:43.203020 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:16:43 crc kubenswrapper[4791]: E0218 01:16:43.203222 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:16:43 crc kubenswrapper[4791]: E0218 01:16:43.204462 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:16:49 crc kubenswrapper[4791]: I0218 01:16:49.071774 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:16:49 crc kubenswrapper[4791]: E0218 01:16:49.072621 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:16:54 crc kubenswrapper[4791]: E0218 01:16:54.062919 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:16:59 crc kubenswrapper[4791]: E0218 01:16:59.071101 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:17:04 crc kubenswrapper[4791]: I0218 01:17:04.062204 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:17:04 crc kubenswrapper[4791]: E0218 01:17:04.063248 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:17:08 crc kubenswrapper[4791]: E0218 01:17:08.068000 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:17:12 crc kubenswrapper[4791]: E0218 01:17:12.063639 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:17:19 crc kubenswrapper[4791]: I0218 01:17:19.068079 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:17:19 crc kubenswrapper[4791]: E0218 01:17:19.068917 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:17:20 crc kubenswrapper[4791]: E0218 01:17:20.064401 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:17:27 crc kubenswrapper[4791]: E0218 01:17:27.066183 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:17:30 crc kubenswrapper[4791]: I0218 01:17:30.920296 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:30 crc kubenswrapper[4791]: E0218 01:17:30.921410 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6494cc9c-ba04-49f3-bd9a-11dba584dea7" containerName="collect-profiles" Feb 18 01:17:30 crc kubenswrapper[4791]: I0218 01:17:30.921424 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6494cc9c-ba04-49f3-bd9a-11dba584dea7" containerName="collect-profiles" Feb 18 01:17:30 crc kubenswrapper[4791]: I0218 01:17:30.921678 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6494cc9c-ba04-49f3-bd9a-11dba584dea7" containerName="collect-profiles" Feb 18 01:17:30 crc kubenswrapper[4791]: I0218 01:17:30.923225 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:30 crc kubenswrapper[4791]: I0218 01:17:30.941818 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.071804 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g568l\" (UniqueName: \"kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.072439 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.072643 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.120781 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2g4lz"] Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.147331 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2g4lz"] Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.147447 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.174880 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g568l\" (UniqueName: \"kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.175103 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.175198 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.176797 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.176918 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.199262 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g568l\" (UniqueName: \"kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l\") pod \"community-operators-pj8g9\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.246752 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.281931 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-utilities\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.281988 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-catalog-content\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.282424 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wwdh\" (UniqueName: \"kubernetes.io/projected/f90a5f9e-114f-4b1c-83fb-698657f3845a-kube-api-access-6wwdh\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.385291 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-utilities\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.385344 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-catalog-content\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.385476 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wwdh\" (UniqueName: \"kubernetes.io/projected/f90a5f9e-114f-4b1c-83fb-698657f3845a-kube-api-access-6wwdh\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.385780 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-utilities\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.386179 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f90a5f9e-114f-4b1c-83fb-698657f3845a-catalog-content\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.403969 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wwdh\" (UniqueName: \"kubernetes.io/projected/f90a5f9e-114f-4b1c-83fb-698657f3845a-kube-api-access-6wwdh\") pod \"certified-operators-2g4lz\" (UID: \"f90a5f9e-114f-4b1c-83fb-698657f3845a\") " pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.475746 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:31 crc kubenswrapper[4791]: I0218 01:17:31.879147 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:32 crc kubenswrapper[4791]: W0218 01:17:32.057956 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf90a5f9e_114f_4b1c_83fb_698657f3845a.slice/crio-ef980beaaeb292016c1c5edcf28ec8fa32005b1f41f1e26d84965d2d3217a953 WatchSource:0}: Error finding container ef980beaaeb292016c1c5edcf28ec8fa32005b1f41f1e26d84965d2d3217a953: Status 404 returned error can't find the container with id ef980beaaeb292016c1c5edcf28ec8fa32005b1f41f1e26d84965d2d3217a953 Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.063305 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2g4lz"] Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.730910 4791 generic.go:334] "Generic (PLEG): container finished" podID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerID="b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13" exitCode=0 Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.731008 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerDied","Data":"b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13"} Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.732374 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerStarted","Data":"9b7bde51286349739b82b22084cc539fe4be5c9afeba7e14984e09fa56cc1381"} Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.737149 4791 generic.go:334] "Generic (PLEG): container finished" podID="f90a5f9e-114f-4b1c-83fb-698657f3845a" containerID="73827bb8e9103b0a1b1464977e0becc8fc1c802b2980b73e7c77962eccc983e5" exitCode=0 Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.737223 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2g4lz" event={"ID":"f90a5f9e-114f-4b1c-83fb-698657f3845a","Type":"ContainerDied","Data":"73827bb8e9103b0a1b1464977e0becc8fc1c802b2980b73e7c77962eccc983e5"} Feb 18 01:17:32 crc kubenswrapper[4791]: I0218 01:17:32.737264 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2g4lz" event={"ID":"f90a5f9e-114f-4b1c-83fb-698657f3845a","Type":"ContainerStarted","Data":"ef980beaaeb292016c1c5edcf28ec8fa32005b1f41f1e26d84965d2d3217a953"} Feb 18 01:17:33 crc kubenswrapper[4791]: I0218 01:17:33.750258 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerStarted","Data":"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd"} Feb 18 01:17:34 crc kubenswrapper[4791]: I0218 01:17:34.062191 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:17:34 crc kubenswrapper[4791]: E0218 01:17:34.062655 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:17:35 crc kubenswrapper[4791]: E0218 01:17:35.194484 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:17:35 crc kubenswrapper[4791]: E0218 01:17:35.194927 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:17:35 crc kubenswrapper[4791]: E0218 01:17:35.195148 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:17:35 crc kubenswrapper[4791]: E0218 01:17:35.196433 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:17:35 crc kubenswrapper[4791]: I0218 01:17:35.775611 4791 generic.go:334] "Generic (PLEG): container finished" podID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerID="e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd" exitCode=0 Feb 18 01:17:35 crc kubenswrapper[4791]: I0218 01:17:35.775678 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerDied","Data":"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd"} Feb 18 01:17:36 crc kubenswrapper[4791]: I0218 01:17:36.791957 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerStarted","Data":"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314"} Feb 18 01:17:36 crc kubenswrapper[4791]: I0218 01:17:36.818426 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pj8g9" podStartSLOduration=3.299579042 podStartE2EDuration="6.818401784s" podCreationTimestamp="2026-02-18 01:17:30 +0000 UTC" firstStartedPulling="2026-02-18 01:17:32.733913861 +0000 UTC m=+2594.301927031" lastFinishedPulling="2026-02-18 01:17:36.252736603 +0000 UTC m=+2597.820749773" observedRunningTime="2026-02-18 01:17:36.814934667 +0000 UTC m=+2598.382947857" watchObservedRunningTime="2026-02-18 01:17:36.818401784 +0000 UTC m=+2598.386414984" Feb 18 01:17:38 crc kubenswrapper[4791]: E0218 01:17:38.063490 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:17:40 crc kubenswrapper[4791]: I0218 01:17:40.849741 4791 generic.go:334] "Generic (PLEG): container finished" podID="f90a5f9e-114f-4b1c-83fb-698657f3845a" containerID="46d3b77f8ae1a29f499d4b2a3c79df80e2cbcbc801da64426160fabe29d05a00" exitCode=0 Feb 18 01:17:40 crc kubenswrapper[4791]: I0218 01:17:40.849858 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2g4lz" event={"ID":"f90a5f9e-114f-4b1c-83fb-698657f3845a","Type":"ContainerDied","Data":"46d3b77f8ae1a29f499d4b2a3c79df80e2cbcbc801da64426160fabe29d05a00"} Feb 18 01:17:41 crc kubenswrapper[4791]: I0218 01:17:41.247844 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:41 crc kubenswrapper[4791]: I0218 01:17:41.247917 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:41 crc kubenswrapper[4791]: I0218 01:17:41.863457 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2g4lz" event={"ID":"f90a5f9e-114f-4b1c-83fb-698657f3845a","Type":"ContainerStarted","Data":"f2f4bfdd82d73001bdb4c39e5925ce965dcf69c5118a85f12527b8441d81532d"} Feb 18 01:17:41 crc kubenswrapper[4791]: I0218 01:17:41.888059 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2g4lz" podStartSLOduration=2.366883745 podStartE2EDuration="10.888043759s" podCreationTimestamp="2026-02-18 01:17:31 +0000 UTC" firstStartedPulling="2026-02-18 01:17:32.739910415 +0000 UTC m=+2594.307923585" lastFinishedPulling="2026-02-18 01:17:41.261070409 +0000 UTC m=+2602.829083599" observedRunningTime="2026-02-18 01:17:41.881102525 +0000 UTC m=+2603.449115695" watchObservedRunningTime="2026-02-18 01:17:41.888043759 +0000 UTC m=+2603.456056929" Feb 18 01:17:42 crc kubenswrapper[4791]: I0218 01:17:42.330502 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-pj8g9" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="registry-server" probeResult="failure" output=< Feb 18 01:17:42 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:17:42 crc kubenswrapper[4791]: > Feb 18 01:17:47 crc kubenswrapper[4791]: I0218 01:17:47.062145 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:17:47 crc kubenswrapper[4791]: E0218 01:17:47.062947 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:17:50 crc kubenswrapper[4791]: E0218 01:17:50.066544 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:17:50 crc kubenswrapper[4791]: E0218 01:17:50.066551 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.305561 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.355678 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.477174 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.477217 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.525827 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:51 crc kubenswrapper[4791]: I0218 01:17:51.544505 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:52 crc kubenswrapper[4791]: I0218 01:17:52.046289 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2g4lz" Feb 18 01:17:52 crc kubenswrapper[4791]: I0218 01:17:52.997255 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pj8g9" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="registry-server" containerID="cri-o://5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314" gracePeriod=2 Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.545116 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.598699 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2g4lz"] Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.621195 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g568l\" (UniqueName: \"kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l\") pod \"87c6b391-56e6-4758-bbd1-91d05dca1478\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.621297 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content\") pod \"87c6b391-56e6-4758-bbd1-91d05dca1478\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.621360 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities\") pod \"87c6b391-56e6-4758-bbd1-91d05dca1478\" (UID: \"87c6b391-56e6-4758-bbd1-91d05dca1478\") " Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.637428 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities" (OuterVolumeSpecName: "utilities") pod "87c6b391-56e6-4758-bbd1-91d05dca1478" (UID: "87c6b391-56e6-4758-bbd1-91d05dca1478"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.651355 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l" (OuterVolumeSpecName: "kube-api-access-g568l") pod "87c6b391-56e6-4758-bbd1-91d05dca1478" (UID: "87c6b391-56e6-4758-bbd1-91d05dca1478"). InnerVolumeSpecName "kube-api-access-g568l". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.680956 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87c6b391-56e6-4758-bbd1-91d05dca1478" (UID: "87c6b391-56e6-4758-bbd1-91d05dca1478"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.724722 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g568l\" (UniqueName: \"kubernetes.io/projected/87c6b391-56e6-4758-bbd1-91d05dca1478-kube-api-access-g568l\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.725095 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.725111 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87c6b391-56e6-4758-bbd1-91d05dca1478-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.949566 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 01:17:53 crc kubenswrapper[4791]: I0218 01:17:53.949836 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vsm7z" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="registry-server" containerID="cri-o://34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c" gracePeriod=2 Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.034126 4791 generic.go:334] "Generic (PLEG): container finished" podID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerID="5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314" exitCode=0 Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.037398 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pj8g9" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.038690 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerDied","Data":"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314"} Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.038891 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pj8g9" event={"ID":"87c6b391-56e6-4758-bbd1-91d05dca1478","Type":"ContainerDied","Data":"9b7bde51286349739b82b22084cc539fe4be5c9afeba7e14984e09fa56cc1381"} Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.039034 4791 scope.go:117] "RemoveContainer" containerID="5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.158010 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.158286 4791 scope.go:117] "RemoveContainer" containerID="e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.169066 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pj8g9"] Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.216955 4791 scope.go:117] "RemoveContainer" containerID="b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.252265 4791 scope.go:117] "RemoveContainer" containerID="5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314" Feb 18 01:17:54 crc kubenswrapper[4791]: E0218 01:17:54.254168 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314\": container with ID starting with 5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314 not found: ID does not exist" containerID="5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.254211 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314"} err="failed to get container status \"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314\": rpc error: code = NotFound desc = could not find container \"5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314\": container with ID starting with 5a7fb46afd2a426a40bc4038f86714c46dfd4e7828d3f3e0b3e899effcb48314 not found: ID does not exist" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.254240 4791 scope.go:117] "RemoveContainer" containerID="e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd" Feb 18 01:17:54 crc kubenswrapper[4791]: E0218 01:17:54.259561 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd\": container with ID starting with e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd not found: ID does not exist" containerID="e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.259603 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd"} err="failed to get container status \"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd\": rpc error: code = NotFound desc = could not find container \"e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd\": container with ID starting with e4a6a4af5eb6664fb05ea5b32f9f2d1192a982a19e1c9c885567522e532cdecd not found: ID does not exist" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.259631 4791 scope.go:117] "RemoveContainer" containerID="b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13" Feb 18 01:17:54 crc kubenswrapper[4791]: E0218 01:17:54.259967 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13\": container with ID starting with b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13 not found: ID does not exist" containerID="b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.259987 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13"} err="failed to get container status \"b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13\": rpc error: code = NotFound desc = could not find container \"b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13\": container with ID starting with b88be4a1abfa014be798e361c697a99713720f30934ce176e286e65d83ee9b13 not found: ID does not exist" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.597429 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.679513 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz96n\" (UniqueName: \"kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n\") pod \"d3f35587-5ab1-46ba-895a-58f199304ab4\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.679689 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities\") pod \"d3f35587-5ab1-46ba-895a-58f199304ab4\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.679749 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content\") pod \"d3f35587-5ab1-46ba-895a-58f199304ab4\" (UID: \"d3f35587-5ab1-46ba-895a-58f199304ab4\") " Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.684184 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities" (OuterVolumeSpecName: "utilities") pod "d3f35587-5ab1-46ba-895a-58f199304ab4" (UID: "d3f35587-5ab1-46ba-895a-58f199304ab4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.694625 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n" (OuterVolumeSpecName: "kube-api-access-zz96n") pod "d3f35587-5ab1-46ba-895a-58f199304ab4" (UID: "d3f35587-5ab1-46ba-895a-58f199304ab4"). InnerVolumeSpecName "kube-api-access-zz96n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.774847 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3f35587-5ab1-46ba-895a-58f199304ab4" (UID: "d3f35587-5ab1-46ba-895a-58f199304ab4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.782994 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz96n\" (UniqueName: \"kubernetes.io/projected/d3f35587-5ab1-46ba-895a-58f199304ab4-kube-api-access-zz96n\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.783033 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:54 crc kubenswrapper[4791]: I0218 01:17:54.783046 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3f35587-5ab1-46ba-895a-58f199304ab4-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.048980 4791 generic.go:334] "Generic (PLEG): container finished" podID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerID="34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c" exitCode=0 Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.049054 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vsm7z" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.049089 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerDied","Data":"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c"} Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.049134 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vsm7z" event={"ID":"d3f35587-5ab1-46ba-895a-58f199304ab4","Type":"ContainerDied","Data":"4879bca979ad3f0cb62966821bee72c11fc48c5c22321d3e8b0b7eaadf030f2c"} Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.049152 4791 scope.go:117] "RemoveContainer" containerID="34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.108738 4791 scope.go:117] "RemoveContainer" containerID="a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.116536 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" path="/var/lib/kubelet/pods/87c6b391-56e6-4758-bbd1-91d05dca1478/volumes" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.119347 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.124403 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vsm7z"] Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.152476 4791 scope.go:117] "RemoveContainer" containerID="a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.177315 4791 scope.go:117] "RemoveContainer" containerID="34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c" Feb 18 01:17:55 crc kubenswrapper[4791]: E0218 01:17:55.178680 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c\": container with ID starting with 34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c not found: ID does not exist" containerID="34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.178715 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c"} err="failed to get container status \"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c\": rpc error: code = NotFound desc = could not find container \"34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c\": container with ID starting with 34e18a1ac2fc28be1fbe59aa7882149f6b9928774520c8e1b68e2d13b3dc602c not found: ID does not exist" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.178740 4791 scope.go:117] "RemoveContainer" containerID="a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f" Feb 18 01:17:55 crc kubenswrapper[4791]: E0218 01:17:55.179025 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f\": container with ID starting with a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f not found: ID does not exist" containerID="a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.179045 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f"} err="failed to get container status \"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f\": rpc error: code = NotFound desc = could not find container \"a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f\": container with ID starting with a1df5982e9c78a0a9551d9a21d3d688dd3084d4b626befc456bb2ba99c902a8f not found: ID does not exist" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.179057 4791 scope.go:117] "RemoveContainer" containerID="a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def" Feb 18 01:17:55 crc kubenswrapper[4791]: E0218 01:17:55.179301 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def\": container with ID starting with a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def not found: ID does not exist" containerID="a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def" Feb 18 01:17:55 crc kubenswrapper[4791]: I0218 01:17:55.179321 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def"} err="failed to get container status \"a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def\": rpc error: code = NotFound desc = could not find container \"a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def\": container with ID starting with a88ceb4b40b58bcbeb42d3e1547d59e7bf2aeb87868c0282b3658984e63e2def not found: ID does not exist" Feb 18 01:17:57 crc kubenswrapper[4791]: I0218 01:17:57.073443 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" path="/var/lib/kubelet/pods/d3f35587-5ab1-46ba-895a-58f199304ab4/volumes" Feb 18 01:17:59 crc kubenswrapper[4791]: I0218 01:17:59.068304 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:17:59 crc kubenswrapper[4791]: E0218 01:17:59.069025 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:18:03 crc kubenswrapper[4791]: E0218 01:18:03.073145 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:18:03 crc kubenswrapper[4791]: E0218 01:18:03.076517 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:18:13 crc kubenswrapper[4791]: I0218 01:18:13.061873 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:18:13 crc kubenswrapper[4791]: E0218 01:18:13.062640 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:18:14 crc kubenswrapper[4791]: E0218 01:18:14.063559 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:18:18 crc kubenswrapper[4791]: E0218 01:18:18.066189 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:18:25 crc kubenswrapper[4791]: I0218 01:18:25.063146 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:18:25 crc kubenswrapper[4791]: E0218 01:18:25.065211 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:18:28 crc kubenswrapper[4791]: E0218 01:18:28.063553 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:18:31 crc kubenswrapper[4791]: E0218 01:18:31.063730 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:18:34 crc kubenswrapper[4791]: I0218 01:18:34.711071 4791 generic.go:334] "Generic (PLEG): container finished" podID="80cb92f6-d3a1-44dc-96de-ce408815087a" containerID="b0c2b2b054dab52febe1671d24a91e5a193e26b86cf446d77aa49dcd3ab0bd4d" exitCode=2 Feb 18 01:18:34 crc kubenswrapper[4791]: I0218 01:18:34.711484 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" event={"ID":"80cb92f6-d3a1-44dc-96de-ce408815087a","Type":"ContainerDied","Data":"b0c2b2b054dab52febe1671d24a91e5a193e26b86cf446d77aa49dcd3ab0bd4d"} Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.251846 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.417110 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam\") pod \"80cb92f6-d3a1-44dc-96de-ce408815087a\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.417233 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5hg5\" (UniqueName: \"kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5\") pod \"80cb92f6-d3a1-44dc-96de-ce408815087a\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.417475 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory\") pod \"80cb92f6-d3a1-44dc-96de-ce408815087a\" (UID: \"80cb92f6-d3a1-44dc-96de-ce408815087a\") " Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.422645 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5" (OuterVolumeSpecName: "kube-api-access-c5hg5") pod "80cb92f6-d3a1-44dc-96de-ce408815087a" (UID: "80cb92f6-d3a1-44dc-96de-ce408815087a"). InnerVolumeSpecName "kube-api-access-c5hg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.446842 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "80cb92f6-d3a1-44dc-96de-ce408815087a" (UID: "80cb92f6-d3a1-44dc-96de-ce408815087a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.456253 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory" (OuterVolumeSpecName: "inventory") pod "80cb92f6-d3a1-44dc-96de-ce408815087a" (UID: "80cb92f6-d3a1-44dc-96de-ce408815087a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.519835 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.519979 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/80cb92f6-d3a1-44dc-96de-ce408815087a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.520040 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5hg5\" (UniqueName: \"kubernetes.io/projected/80cb92f6-d3a1-44dc-96de-ce408815087a-kube-api-access-c5hg5\") on node \"crc\" DevicePath \"\"" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.729577 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" event={"ID":"80cb92f6-d3a1-44dc-96de-ce408815087a","Type":"ContainerDied","Data":"3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94"} Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.729626 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c82d5719234644a5e64538768a41250a7a3722214b398740c70b1bdae558b94" Feb 18 01:18:36 crc kubenswrapper[4791]: I0218 01:18:36.729630 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr" Feb 18 01:18:38 crc kubenswrapper[4791]: I0218 01:18:38.062439 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:18:38 crc kubenswrapper[4791]: E0218 01:18:38.063433 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:18:42 crc kubenswrapper[4791]: E0218 01:18:42.063370 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:18:45 crc kubenswrapper[4791]: E0218 01:18:45.064563 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:18:53 crc kubenswrapper[4791]: I0218 01:18:53.062049 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:18:53 crc kubenswrapper[4791]: E0218 01:18:53.063226 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.065016 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.068828 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw"] Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.069960 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="extract-utilities" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070013 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="extract-utilities" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070070 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="extract-utilities" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070090 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="extract-utilities" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070138 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070197 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070272 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="extract-content" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070294 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="extract-content" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070336 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070353 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070394 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="extract-content" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070413 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="extract-content" Feb 18 01:18:54 crc kubenswrapper[4791]: E0218 01:18:54.070437 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80cb92f6-d3a1-44dc-96de-ce408815087a" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.070457 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="80cb92f6-d3a1-44dc-96de-ce408815087a" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.071025 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="80cb92f6-d3a1-44dc-96de-ce408815087a" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.071119 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="87c6b391-56e6-4758-bbd1-91d05dca1478" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.071196 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3f35587-5ab1-46ba-895a-58f199304ab4" containerName="registry-server" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.073097 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.078905 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.079540 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.078995 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.079092 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.110578 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw"] Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.186256 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvb8t\" (UniqueName: \"kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.187392 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.187565 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.289872 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvb8t\" (UniqueName: \"kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.289974 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.290027 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.296554 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.304626 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.306370 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvb8t\" (UniqueName: \"kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.401280 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:18:54 crc kubenswrapper[4791]: I0218 01:18:54.954272 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw"] Feb 18 01:18:55 crc kubenswrapper[4791]: I0218 01:18:55.963519 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" event={"ID":"43c4470a-baeb-43d6-bb3e-ff571be8c778","Type":"ContainerStarted","Data":"1d50abf02a3334e95e61efe4f2a8e783e84f97222a754d16382adb719f84a7a4"} Feb 18 01:18:55 crc kubenswrapper[4791]: I0218 01:18:55.963960 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" event={"ID":"43c4470a-baeb-43d6-bb3e-ff571be8c778","Type":"ContainerStarted","Data":"a663edba20fcc4594c04ce07acb076a9a4ad715ae0da159f0b353c6dee3300e3"} Feb 18 01:18:55 crc kubenswrapper[4791]: I0218 01:18:55.987502 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" podStartSLOduration=1.469051004 podStartE2EDuration="1.987482427s" podCreationTimestamp="2026-02-18 01:18:54 +0000 UTC" firstStartedPulling="2026-02-18 01:18:54.968452036 +0000 UTC m=+2676.536465246" lastFinishedPulling="2026-02-18 01:18:55.486883499 +0000 UTC m=+2677.054896669" observedRunningTime="2026-02-18 01:18:55.983866356 +0000 UTC m=+2677.551879526" watchObservedRunningTime="2026-02-18 01:18:55.987482427 +0000 UTC m=+2677.555495597" Feb 18 01:18:57 crc kubenswrapper[4791]: E0218 01:18:57.065648 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:19:05 crc kubenswrapper[4791]: I0218 01:19:05.061477 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:19:05 crc kubenswrapper[4791]: E0218 01:19:05.062334 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:19:06 crc kubenswrapper[4791]: E0218 01:19:06.063268 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:19:12 crc kubenswrapper[4791]: E0218 01:19:12.064906 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:19:18 crc kubenswrapper[4791]: I0218 01:19:18.060778 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:19:18 crc kubenswrapper[4791]: E0218 01:19:18.061355 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:19:21 crc kubenswrapper[4791]: E0218 01:19:21.063330 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:19:25 crc kubenswrapper[4791]: E0218 01:19:25.064041 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:19:31 crc kubenswrapper[4791]: I0218 01:19:31.652353 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:19:31 crc kubenswrapper[4791]: E0218 01:19:31.660858 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:19:32 crc kubenswrapper[4791]: I0218 01:19:32.690759 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914"} Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.808374 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.811864 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.817317 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.866564 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.866940 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6286\" (UniqueName: \"kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.867065 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.969808 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.969887 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.969968 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6286\" (UniqueName: \"kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.970448 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.970620 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:34 crc kubenswrapper[4791]: I0218 01:19:34.998546 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6286\" (UniqueName: \"kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286\") pod \"redhat-operators-wwwps\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:35 crc kubenswrapper[4791]: I0218 01:19:35.136678 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:35 crc kubenswrapper[4791]: I0218 01:19:35.621757 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:19:35 crc kubenswrapper[4791]: W0218 01:19:35.631923 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36a75ee7_4e0c_46ee_8b15_0e57ee1f06d5.slice/crio-b89404f415c9378dc8a7c75b16f1528247146af2c9115f313040a3d96e1b75c1 WatchSource:0}: Error finding container b89404f415c9378dc8a7c75b16f1528247146af2c9115f313040a3d96e1b75c1: Status 404 returned error can't find the container with id b89404f415c9378dc8a7c75b16f1528247146af2c9115f313040a3d96e1b75c1 Feb 18 01:19:35 crc kubenswrapper[4791]: I0218 01:19:35.729061 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerStarted","Data":"b89404f415c9378dc8a7c75b16f1528247146af2c9115f313040a3d96e1b75c1"} Feb 18 01:19:36 crc kubenswrapper[4791]: I0218 01:19:36.740596 4791 generic.go:334] "Generic (PLEG): container finished" podID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerID="879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee" exitCode=0 Feb 18 01:19:36 crc kubenswrapper[4791]: I0218 01:19:36.740647 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerDied","Data":"879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee"} Feb 18 01:19:38 crc kubenswrapper[4791]: E0218 01:19:38.062947 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:19:38 crc kubenswrapper[4791]: I0218 01:19:38.760709 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerStarted","Data":"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607"} Feb 18 01:19:42 crc kubenswrapper[4791]: I0218 01:19:42.801979 4791 generic.go:334] "Generic (PLEG): container finished" podID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerID="c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607" exitCode=0 Feb 18 01:19:42 crc kubenswrapper[4791]: I0218 01:19:42.802055 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerDied","Data":"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607"} Feb 18 01:19:43 crc kubenswrapper[4791]: I0218 01:19:43.819534 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerStarted","Data":"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1"} Feb 18 01:19:43 crc kubenswrapper[4791]: I0218 01:19:43.853372 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wwwps" podStartSLOduration=3.265020759 podStartE2EDuration="9.853339153s" podCreationTimestamp="2026-02-18 01:19:34 +0000 UTC" firstStartedPulling="2026-02-18 01:19:36.742966194 +0000 UTC m=+2718.310979364" lastFinishedPulling="2026-02-18 01:19:43.331284588 +0000 UTC m=+2724.899297758" observedRunningTime="2026-02-18 01:19:43.850526817 +0000 UTC m=+2725.418539987" watchObservedRunningTime="2026-02-18 01:19:43.853339153 +0000 UTC m=+2725.421352353" Feb 18 01:19:45 crc kubenswrapper[4791]: E0218 01:19:45.063188 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:19:45 crc kubenswrapper[4791]: I0218 01:19:45.137310 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:45 crc kubenswrapper[4791]: I0218 01:19:45.140449 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:19:46 crc kubenswrapper[4791]: I0218 01:19:46.198713 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wwwps" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" probeResult="failure" output=< Feb 18 01:19:46 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:19:46 crc kubenswrapper[4791]: > Feb 18 01:19:52 crc kubenswrapper[4791]: E0218 01:19:52.063642 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:19:56 crc kubenswrapper[4791]: I0218 01:19:56.216111 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wwwps" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" probeResult="failure" output=< Feb 18 01:19:56 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:19:56 crc kubenswrapper[4791]: > Feb 18 01:19:58 crc kubenswrapper[4791]: E0218 01:19:58.062614 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:20:05 crc kubenswrapper[4791]: I0218 01:20:05.218705 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:20:05 crc kubenswrapper[4791]: I0218 01:20:05.291865 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:20:06 crc kubenswrapper[4791]: I0218 01:20:06.019276 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:20:06 crc kubenswrapper[4791]: E0218 01:20:06.064005 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.089847 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wwwps" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" containerID="cri-o://99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1" gracePeriod=2 Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.663122 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.778644 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6286\" (UniqueName: \"kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286\") pod \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.778752 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content\") pod \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.778784 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities\") pod \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\" (UID: \"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5\") " Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.781954 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities" (OuterVolumeSpecName: "utilities") pod "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" (UID: "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.784138 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.818185 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286" (OuterVolumeSpecName: "kube-api-access-v6286") pod "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" (UID: "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5"). InnerVolumeSpecName "kube-api-access-v6286". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.886101 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6286\" (UniqueName: \"kubernetes.io/projected/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-kube-api-access-v6286\") on node \"crc\" DevicePath \"\"" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.911472 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" (UID: "36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:20:07 crc kubenswrapper[4791]: I0218 01:20:07.989346 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.104602 4791 generic.go:334] "Generic (PLEG): container finished" podID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerID="99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1" exitCode=0 Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.104657 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerDied","Data":"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1"} Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.104721 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwwps" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.104744 4791 scope.go:117] "RemoveContainer" containerID="99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.104727 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwwps" event={"ID":"36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5","Type":"ContainerDied","Data":"b89404f415c9378dc8a7c75b16f1528247146af2c9115f313040a3d96e1b75c1"} Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.149236 4791 scope.go:117] "RemoveContainer" containerID="c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.157214 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.169355 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wwwps"] Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.174435 4791 scope.go:117] "RemoveContainer" containerID="879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.235894 4791 scope.go:117] "RemoveContainer" containerID="99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1" Feb 18 01:20:08 crc kubenswrapper[4791]: E0218 01:20:08.237504 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1\": container with ID starting with 99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1 not found: ID does not exist" containerID="99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.237584 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1"} err="failed to get container status \"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1\": rpc error: code = NotFound desc = could not find container \"99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1\": container with ID starting with 99145b544b30e41879475d5ab32967a685b00711061ff8111efcef45a62a02c1 not found: ID does not exist" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.237617 4791 scope.go:117] "RemoveContainer" containerID="c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607" Feb 18 01:20:08 crc kubenswrapper[4791]: E0218 01:20:08.239331 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607\": container with ID starting with c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607 not found: ID does not exist" containerID="c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.239460 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607"} err="failed to get container status \"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607\": rpc error: code = NotFound desc = could not find container \"c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607\": container with ID starting with c85cb9625395ba9577b58cc4de4926ff3697dd1f945a104f7df44cce9ed99607 not found: ID does not exist" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.239553 4791 scope.go:117] "RemoveContainer" containerID="879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee" Feb 18 01:20:08 crc kubenswrapper[4791]: E0218 01:20:08.240057 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee\": container with ID starting with 879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee not found: ID does not exist" containerID="879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee" Feb 18 01:20:08 crc kubenswrapper[4791]: I0218 01:20:08.240090 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee"} err="failed to get container status \"879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee\": rpc error: code = NotFound desc = could not find container \"879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee\": container with ID starting with 879971771ad4709e556c95ef3308ad2fa8a703d6cd3af68396d9d437eac23dee not found: ID does not exist" Feb 18 01:20:09 crc kubenswrapper[4791]: I0218 01:20:09.075117 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" path="/var/lib/kubelet/pods/36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5/volumes" Feb 18 01:20:13 crc kubenswrapper[4791]: E0218 01:20:13.064350 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:20:19 crc kubenswrapper[4791]: E0218 01:20:19.065129 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:20:24 crc kubenswrapper[4791]: E0218 01:20:24.064391 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:20:32 crc kubenswrapper[4791]: E0218 01:20:32.280890 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:20:39 crc kubenswrapper[4791]: E0218 01:20:39.073259 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:20:43 crc kubenswrapper[4791]: E0218 01:20:43.069402 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:20:53 crc kubenswrapper[4791]: E0218 01:20:53.064380 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:20:55 crc kubenswrapper[4791]: E0218 01:20:55.063906 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:06 crc kubenswrapper[4791]: E0218 01:21:06.064094 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:21:07 crc kubenswrapper[4791]: E0218 01:21:07.063194 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:20 crc kubenswrapper[4791]: E0218 01:21:20.064631 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:21 crc kubenswrapper[4791]: E0218 01:21:21.065579 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:21:32 crc kubenswrapper[4791]: E0218 01:21:32.064349 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:21:33 crc kubenswrapper[4791]: E0218 01:21:33.062841 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:43 crc kubenswrapper[4791]: E0218 01:21:43.064319 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:21:45 crc kubenswrapper[4791]: E0218 01:21:45.064077 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:56 crc kubenswrapper[4791]: I0218 01:21:56.801878 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:21:56 crc kubenswrapper[4791]: I0218 01:21:56.802596 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:21:57 crc kubenswrapper[4791]: E0218 01:21:57.065011 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:21:57 crc kubenswrapper[4791]: I0218 01:21:57.065332 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:21:57 crc kubenswrapper[4791]: E0218 01:21:57.205013 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:21:57 crc kubenswrapper[4791]: E0218 01:21:57.205088 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:21:57 crc kubenswrapper[4791]: E0218 01:21:57.205317 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:21:57 crc kubenswrapper[4791]: E0218 01:21:57.206668 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:22:09 crc kubenswrapper[4791]: E0218 01:22:09.070601 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:22:10 crc kubenswrapper[4791]: E0218 01:22:10.062479 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:22:20 crc kubenswrapper[4791]: E0218 01:22:20.065117 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:22:23 crc kubenswrapper[4791]: E0218 01:22:23.062925 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:22:26 crc kubenswrapper[4791]: I0218 01:22:26.799810 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:22:26 crc kubenswrapper[4791]: I0218 01:22:26.800413 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:22:33 crc kubenswrapper[4791]: E0218 01:22:33.072751 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:22:34 crc kubenswrapper[4791]: E0218 01:22:34.064087 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:22:46 crc kubenswrapper[4791]: E0218 01:22:46.064083 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:22:48 crc kubenswrapper[4791]: E0218 01:22:48.144477 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:22:48 crc kubenswrapper[4791]: E0218 01:22:48.144872 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:22:48 crc kubenswrapper[4791]: E0218 01:22:48.145194 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:22:48 crc kubenswrapper[4791]: E0218 01:22:48.146577 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.799505 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.801376 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.801494 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.802360 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.802498 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914" gracePeriod=600 Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.966930 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914" exitCode=0 Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.967010 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914"} Feb 18 01:22:56 crc kubenswrapper[4791]: I0218 01:22:56.967077 4791 scope.go:117] "RemoveContainer" containerID="ab05f5302892c0c3c036416d2782c56649d1db2dfbd76a18bbb4188e41a054e1" Feb 18 01:22:57 crc kubenswrapper[4791]: I0218 01:22:57.986427 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5"} Feb 18 01:22:59 crc kubenswrapper[4791]: E0218 01:22:59.078230 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:22:59 crc kubenswrapper[4791]: E0218 01:22:59.079236 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:23:10 crc kubenswrapper[4791]: E0218 01:23:10.063680 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:23:14 crc kubenswrapper[4791]: E0218 01:23:14.065813 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:23:25 crc kubenswrapper[4791]: E0218 01:23:25.064091 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:23:28 crc kubenswrapper[4791]: E0218 01:23:28.064458 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:23:38 crc kubenswrapper[4791]: E0218 01:23:38.064667 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.167441 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:39 crc kubenswrapper[4791]: E0218 01:23:39.171091 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="extract-utilities" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.171120 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="extract-utilities" Feb 18 01:23:39 crc kubenswrapper[4791]: E0218 01:23:39.171379 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="extract-content" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.171391 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="extract-content" Feb 18 01:23:39 crc kubenswrapper[4791]: E0218 01:23:39.171430 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.171438 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.173390 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="36a75ee7-4e0c-46ee-8b15-0e57ee1f06d5" containerName="registry-server" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.175117 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.185722 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.313924 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.314006 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.314245 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9tc2\" (UniqueName: \"kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.416907 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9tc2\" (UniqueName: \"kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.417496 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.417882 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.417935 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.418273 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.446057 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9tc2\" (UniqueName: \"kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2\") pod \"redhat-marketplace-bb5sn\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.503416 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:39 crc kubenswrapper[4791]: I0218 01:23:39.973774 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:40 crc kubenswrapper[4791]: E0218 01:23:40.062907 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:23:40 crc kubenswrapper[4791]: I0218 01:23:40.443214 4791 generic.go:334] "Generic (PLEG): container finished" podID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerID="30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8" exitCode=0 Feb 18 01:23:40 crc kubenswrapper[4791]: I0218 01:23:40.443309 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerDied","Data":"30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8"} Feb 18 01:23:40 crc kubenswrapper[4791]: I0218 01:23:40.443450 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerStarted","Data":"2b630a31296e842fffcbd8ab3aad5cacc8a2323627654894eea413b45c83ab12"} Feb 18 01:23:42 crc kubenswrapper[4791]: I0218 01:23:42.464540 4791 generic.go:334] "Generic (PLEG): container finished" podID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerID="ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764" exitCode=0 Feb 18 01:23:42 crc kubenswrapper[4791]: I0218 01:23:42.464598 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerDied","Data":"ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764"} Feb 18 01:23:43 crc kubenswrapper[4791]: I0218 01:23:43.475710 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerStarted","Data":"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355"} Feb 18 01:23:43 crc kubenswrapper[4791]: I0218 01:23:43.501858 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bb5sn" podStartSLOduration=2.067150601 podStartE2EDuration="4.501834496s" podCreationTimestamp="2026-02-18 01:23:39 +0000 UTC" firstStartedPulling="2026-02-18 01:23:40.445059631 +0000 UTC m=+2962.013072801" lastFinishedPulling="2026-02-18 01:23:42.879743526 +0000 UTC m=+2964.447756696" observedRunningTime="2026-02-18 01:23:43.497329158 +0000 UTC m=+2965.065342328" watchObservedRunningTime="2026-02-18 01:23:43.501834496 +0000 UTC m=+2965.069847686" Feb 18 01:23:49 crc kubenswrapper[4791]: I0218 01:23:49.503714 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:49 crc kubenswrapper[4791]: I0218 01:23:49.504347 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:49 crc kubenswrapper[4791]: I0218 01:23:49.560625 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:49 crc kubenswrapper[4791]: I0218 01:23:49.629980 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:49 crc kubenswrapper[4791]: I0218 01:23:49.807835 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:51 crc kubenswrapper[4791]: I0218 01:23:51.570167 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bb5sn" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="registry-server" containerID="cri-o://4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355" gracePeriod=2 Feb 18 01:23:52 crc kubenswrapper[4791]: E0218 01:23:52.063638 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:23:52 crc kubenswrapper[4791]: E0218 01:23:52.063732 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.191255 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.343110 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities\") pod \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.343330 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content\") pod \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.343409 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9tc2\" (UniqueName: \"kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2\") pod \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\" (UID: \"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65\") " Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.344220 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities" (OuterVolumeSpecName: "utilities") pod "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" (UID: "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.349214 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2" (OuterVolumeSpecName: "kube-api-access-k9tc2") pod "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" (UID: "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65"). InnerVolumeSpecName "kube-api-access-k9tc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.365849 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" (UID: "8ec402bd-bbb5-43e4-b768-88d4bdf1eb65"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.446388 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9tc2\" (UniqueName: \"kubernetes.io/projected/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-kube-api-access-k9tc2\") on node \"crc\" DevicePath \"\"" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.446423 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.446438 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.581514 4791 generic.go:334] "Generic (PLEG): container finished" podID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerID="4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355" exitCode=0 Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.581567 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerDied","Data":"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355"} Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.581577 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb5sn" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.581606 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb5sn" event={"ID":"8ec402bd-bbb5-43e4-b768-88d4bdf1eb65","Type":"ContainerDied","Data":"2b630a31296e842fffcbd8ab3aad5cacc8a2323627654894eea413b45c83ab12"} Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.581627 4791 scope.go:117] "RemoveContainer" containerID="4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.600711 4791 scope.go:117] "RemoveContainer" containerID="ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.624279 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.632996 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb5sn"] Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.641309 4791 scope.go:117] "RemoveContainer" containerID="30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.703068 4791 scope.go:117] "RemoveContainer" containerID="4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355" Feb 18 01:23:52 crc kubenswrapper[4791]: E0218 01:23:52.703524 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355\": container with ID starting with 4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355 not found: ID does not exist" containerID="4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.703551 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355"} err="failed to get container status \"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355\": rpc error: code = NotFound desc = could not find container \"4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355\": container with ID starting with 4c4b42a3581cc40aacb5c1c3a263613710d97c395937821426734fdc1ff20355 not found: ID does not exist" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.703571 4791 scope.go:117] "RemoveContainer" containerID="ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764" Feb 18 01:23:52 crc kubenswrapper[4791]: E0218 01:23:52.703833 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764\": container with ID starting with ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764 not found: ID does not exist" containerID="ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.703855 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764"} err="failed to get container status \"ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764\": rpc error: code = NotFound desc = could not find container \"ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764\": container with ID starting with ffaf34c2391c4d77d041446122f84bf78c96316a5156432fb2199294b1af8764 not found: ID does not exist" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.703868 4791 scope.go:117] "RemoveContainer" containerID="30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8" Feb 18 01:23:52 crc kubenswrapper[4791]: E0218 01:23:52.704043 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8\": container with ID starting with 30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8 not found: ID does not exist" containerID="30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8" Feb 18 01:23:52 crc kubenswrapper[4791]: I0218 01:23:52.704062 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8"} err="failed to get container status \"30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8\": rpc error: code = NotFound desc = could not find container \"30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8\": container with ID starting with 30f89189d0dfa136126e6e4a570d08f7cdcd8b14abbba952b5c194e4a49521b8 not found: ID does not exist" Feb 18 01:23:53 crc kubenswrapper[4791]: I0218 01:23:53.076848 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" path="/var/lib/kubelet/pods/8ec402bd-bbb5-43e4-b768-88d4bdf1eb65/volumes" Feb 18 01:24:06 crc kubenswrapper[4791]: E0218 01:24:06.064463 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:24:07 crc kubenswrapper[4791]: E0218 01:24:07.063731 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:24:20 crc kubenswrapper[4791]: E0218 01:24:20.066392 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:24:21 crc kubenswrapper[4791]: E0218 01:24:21.078876 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:24:33 crc kubenswrapper[4791]: E0218 01:24:33.064265 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:24:34 crc kubenswrapper[4791]: E0218 01:24:34.064630 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:24:48 crc kubenswrapper[4791]: E0218 01:24:48.063764 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:24:48 crc kubenswrapper[4791]: E0218 01:24:48.064762 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:01 crc kubenswrapper[4791]: E0218 01:25:01.065619 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:03 crc kubenswrapper[4791]: E0218 01:25:03.064353 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:25:12 crc kubenswrapper[4791]: E0218 01:25:12.064507 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:14 crc kubenswrapper[4791]: I0218 01:25:14.839105 4791 generic.go:334] "Generic (PLEG): container finished" podID="43c4470a-baeb-43d6-bb3e-ff571be8c778" containerID="1d50abf02a3334e95e61efe4f2a8e783e84f97222a754d16382adb719f84a7a4" exitCode=2 Feb 18 01:25:14 crc kubenswrapper[4791]: I0218 01:25:14.839246 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" event={"ID":"43c4470a-baeb-43d6-bb3e-ff571be8c778","Type":"ContainerDied","Data":"1d50abf02a3334e95e61efe4f2a8e783e84f97222a754d16382adb719f84a7a4"} Feb 18 01:25:15 crc kubenswrapper[4791]: E0218 01:25:15.064309 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.373842 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.533891 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam\") pod \"43c4470a-baeb-43d6-bb3e-ff571be8c778\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.534078 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory\") pod \"43c4470a-baeb-43d6-bb3e-ff571be8c778\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.534225 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvb8t\" (UniqueName: \"kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t\") pod \"43c4470a-baeb-43d6-bb3e-ff571be8c778\" (UID: \"43c4470a-baeb-43d6-bb3e-ff571be8c778\") " Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.546536 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t" (OuterVolumeSpecName: "kube-api-access-kvb8t") pod "43c4470a-baeb-43d6-bb3e-ff571be8c778" (UID: "43c4470a-baeb-43d6-bb3e-ff571be8c778"). InnerVolumeSpecName "kube-api-access-kvb8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.568455 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory" (OuterVolumeSpecName: "inventory") pod "43c4470a-baeb-43d6-bb3e-ff571be8c778" (UID: "43c4470a-baeb-43d6-bb3e-ff571be8c778"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.570479 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "43c4470a-baeb-43d6-bb3e-ff571be8c778" (UID: "43c4470a-baeb-43d6-bb3e-ff571be8c778"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.637435 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.637471 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43c4470a-baeb-43d6-bb3e-ff571be8c778-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.637481 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvb8t\" (UniqueName: \"kubernetes.io/projected/43c4470a-baeb-43d6-bb3e-ff571be8c778-kube-api-access-kvb8t\") on node \"crc\" DevicePath \"\"" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.862418 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" event={"ID":"43c4470a-baeb-43d6-bb3e-ff571be8c778","Type":"ContainerDied","Data":"a663edba20fcc4594c04ce07acb076a9a4ad715ae0da159f0b353c6dee3300e3"} Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.862459 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a663edba20fcc4594c04ce07acb076a9a4ad715ae0da159f0b353c6dee3300e3" Feb 18 01:25:16 crc kubenswrapper[4791]: I0218 01:25:16.862467 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw" Feb 18 01:25:26 crc kubenswrapper[4791]: E0218 01:25:26.065850 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:26 crc kubenswrapper[4791]: I0218 01:25:26.800525 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:25:26 crc kubenswrapper[4791]: I0218 01:25:26.800600 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:25:28 crc kubenswrapper[4791]: E0218 01:25:28.064728 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:25:39 crc kubenswrapper[4791]: E0218 01:25:39.072930 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:41 crc kubenswrapper[4791]: E0218 01:25:41.066180 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:25:50 crc kubenswrapper[4791]: E0218 01:25:50.063743 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.044221 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565"] Feb 18 01:25:54 crc kubenswrapper[4791]: E0218 01:25:54.045341 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43c4470a-baeb-43d6-bb3e-ff571be8c778" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045360 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="43c4470a-baeb-43d6-bb3e-ff571be8c778" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:25:54 crc kubenswrapper[4791]: E0218 01:25:54.045381 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="extract-content" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045389 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="extract-content" Feb 18 01:25:54 crc kubenswrapper[4791]: E0218 01:25:54.045415 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="extract-utilities" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045424 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="extract-utilities" Feb 18 01:25:54 crc kubenswrapper[4791]: E0218 01:25:54.045464 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="registry-server" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045472 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="registry-server" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045738 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="43c4470a-baeb-43d6-bb3e-ff571be8c778" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.045766 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec402bd-bbb5-43e4-b768-88d4bdf1eb65" containerName="registry-server" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.046723 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.049339 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.049646 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.050095 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.054666 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.081465 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565"] Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.131115 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4znd5\" (UniqueName: \"kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.131228 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.131278 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.233957 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.235026 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4znd5\" (UniqueName: \"kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.235986 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.241662 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.248748 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.260110 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4znd5\" (UniqueName: \"kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vs565\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.376674 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:25:54 crc kubenswrapper[4791]: I0218 01:25:54.928946 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565"] Feb 18 01:25:55 crc kubenswrapper[4791]: E0218 01:25:55.062550 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:25:55 crc kubenswrapper[4791]: I0218 01:25:55.354044 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" event={"ID":"2bee1000-5b84-4271-9e51-adb4f12eaadb","Type":"ContainerStarted","Data":"061752326fa3937dd89777450dabdee334ae8cbc3f130632be5bfc5203f1cf50"} Feb 18 01:25:56 crc kubenswrapper[4791]: I0218 01:25:56.365108 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" event={"ID":"2bee1000-5b84-4271-9e51-adb4f12eaadb","Type":"ContainerStarted","Data":"785350efd1039a12488b9f589c2141c62ac21852fdffa2b3dea38fc3cbc23a94"} Feb 18 01:25:56 crc kubenswrapper[4791]: I0218 01:25:56.378618 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" podStartSLOduration=1.9527158789999999 podStartE2EDuration="2.378594152s" podCreationTimestamp="2026-02-18 01:25:54 +0000 UTC" firstStartedPulling="2026-02-18 01:25:54.929479674 +0000 UTC m=+3096.497492844" lastFinishedPulling="2026-02-18 01:25:55.355357947 +0000 UTC m=+3096.923371117" observedRunningTime="2026-02-18 01:25:56.376791866 +0000 UTC m=+3097.944805056" watchObservedRunningTime="2026-02-18 01:25:56.378594152 +0000 UTC m=+3097.946607352" Feb 18 01:25:56 crc kubenswrapper[4791]: I0218 01:25:56.800193 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:25:56 crc kubenswrapper[4791]: I0218 01:25:56.800282 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:26:01 crc kubenswrapper[4791]: E0218 01:26:01.063315 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:26:06 crc kubenswrapper[4791]: E0218 01:26:06.064793 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:26:13 crc kubenswrapper[4791]: E0218 01:26:13.064361 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:26:17 crc kubenswrapper[4791]: E0218 01:26:17.065101 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:26:26 crc kubenswrapper[4791]: I0218 01:26:26.800141 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:26:26 crc kubenswrapper[4791]: I0218 01:26:26.800799 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:26:26 crc kubenswrapper[4791]: I0218 01:26:26.800881 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:26:26 crc kubenswrapper[4791]: I0218 01:26:26.802128 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:26:26 crc kubenswrapper[4791]: I0218 01:26:26.802248 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" gracePeriod=600 Feb 18 01:26:26 crc kubenswrapper[4791]: E0218 01:26:26.954792 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:26:27 crc kubenswrapper[4791]: E0218 01:26:27.062544 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:26:27 crc kubenswrapper[4791]: I0218 01:26:27.767677 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" exitCode=0 Feb 18 01:26:27 crc kubenswrapper[4791]: I0218 01:26:27.767718 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5"} Feb 18 01:26:27 crc kubenswrapper[4791]: I0218 01:26:27.767750 4791 scope.go:117] "RemoveContainer" containerID="96f6b6f2e77d5877ada88a2fb85237314890c1147c715c19089e59128b302914" Feb 18 01:26:27 crc kubenswrapper[4791]: I0218 01:26:27.768513 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:26:27 crc kubenswrapper[4791]: E0218 01:26:27.768768 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:26:32 crc kubenswrapper[4791]: E0218 01:26:32.064353 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:26:38 crc kubenswrapper[4791]: E0218 01:26:38.065977 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:26:39 crc kubenswrapper[4791]: I0218 01:26:39.076232 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:26:39 crc kubenswrapper[4791]: E0218 01:26:39.076841 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:26:47 crc kubenswrapper[4791]: E0218 01:26:47.066325 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:26:49 crc kubenswrapper[4791]: E0218 01:26:49.062934 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:26:51 crc kubenswrapper[4791]: I0218 01:26:51.062687 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:26:51 crc kubenswrapper[4791]: E0218 01:26:51.063393 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:27:00 crc kubenswrapper[4791]: E0218 01:27:00.064339 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:27:03 crc kubenswrapper[4791]: I0218 01:27:03.063523 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:27:03 crc kubenswrapper[4791]: E0218 01:27:03.201005 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:27:03 crc kubenswrapper[4791]: E0218 01:27:03.201069 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:27:03 crc kubenswrapper[4791]: E0218 01:27:03.201223 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:27:03 crc kubenswrapper[4791]: E0218 01:27:03.202435 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:27:06 crc kubenswrapper[4791]: I0218 01:27:06.061558 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:27:06 crc kubenswrapper[4791]: E0218 01:27:06.062562 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:27:13 crc kubenswrapper[4791]: E0218 01:27:13.065603 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:27:18 crc kubenswrapper[4791]: I0218 01:27:18.062784 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:27:18 crc kubenswrapper[4791]: E0218 01:27:18.063600 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:27:18 crc kubenswrapper[4791]: E0218 01:27:18.063828 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:27:28 crc kubenswrapper[4791]: E0218 01:27:28.064045 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:27:30 crc kubenswrapper[4791]: E0218 01:27:30.064707 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:27:31 crc kubenswrapper[4791]: I0218 01:27:31.063273 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:27:31 crc kubenswrapper[4791]: E0218 01:27:31.063504 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:27:39 crc kubenswrapper[4791]: E0218 01:27:39.072024 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:27:41 crc kubenswrapper[4791]: E0218 01:27:41.064901 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:27:45 crc kubenswrapper[4791]: I0218 01:27:45.061512 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:27:45 crc kubenswrapper[4791]: E0218 01:27:45.062314 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.331241 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.333835 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.347991 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.452228 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.452327 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.452560 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vk6nv\" (UniqueName: \"kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.555291 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.555363 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vk6nv\" (UniqueName: \"kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.555528 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.555897 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.556060 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.577616 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vk6nv\" (UniqueName: \"kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv\") pod \"community-operators-99trp\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:47 crc kubenswrapper[4791]: I0218 01:27:47.694672 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:49 crc kubenswrapper[4791]: I0218 01:27:48.316572 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:27:49 crc kubenswrapper[4791]: I0218 01:27:48.772329 4791 generic.go:334] "Generic (PLEG): container finished" podID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerID="1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250" exitCode=0 Feb 18 01:27:49 crc kubenswrapper[4791]: I0218 01:27:48.772383 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerDied","Data":"1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250"} Feb 18 01:27:49 crc kubenswrapper[4791]: I0218 01:27:48.772751 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerStarted","Data":"79e13dacaf801b36dd0657cada0c5bb6d9e99ac6d70770cac73328b8ff4aeb6c"} Feb 18 01:27:50 crc kubenswrapper[4791]: I0218 01:27:50.793298 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerStarted","Data":"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09"} Feb 18 01:27:52 crc kubenswrapper[4791]: E0218 01:27:52.190999 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:27:52 crc kubenswrapper[4791]: E0218 01:27:52.191254 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:27:52 crc kubenswrapper[4791]: E0218 01:27:52.191419 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:27:52 crc kubenswrapper[4791]: E0218 01:27:52.193212 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:27:52 crc kubenswrapper[4791]: I0218 01:27:52.820572 4791 generic.go:334] "Generic (PLEG): container finished" podID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerID="2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09" exitCode=0 Feb 18 01:27:52 crc kubenswrapper[4791]: I0218 01:27:52.820644 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerDied","Data":"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09"} Feb 18 01:27:53 crc kubenswrapper[4791]: I0218 01:27:53.832895 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerStarted","Data":"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4"} Feb 18 01:27:53 crc kubenswrapper[4791]: I0218 01:27:53.851590 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-99trp" podStartSLOduration=2.392248634 podStartE2EDuration="6.85157047s" podCreationTimestamp="2026-02-18 01:27:47 +0000 UTC" firstStartedPulling="2026-02-18 01:27:48.775417362 +0000 UTC m=+3210.343430532" lastFinishedPulling="2026-02-18 01:27:53.234739198 +0000 UTC m=+3214.802752368" observedRunningTime="2026-02-18 01:27:53.847003218 +0000 UTC m=+3215.415016398" watchObservedRunningTime="2026-02-18 01:27:53.85157047 +0000 UTC m=+3215.419583660" Feb 18 01:27:56 crc kubenswrapper[4791]: E0218 01:27:56.063190 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:27:57 crc kubenswrapper[4791]: I0218 01:27:57.695480 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:57 crc kubenswrapper[4791]: I0218 01:27:57.696764 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:57 crc kubenswrapper[4791]: I0218 01:27:57.767912 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:58 crc kubenswrapper[4791]: I0218 01:27:58.944042 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:27:58 crc kubenswrapper[4791]: I0218 01:27:58.992557 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:28:00 crc kubenswrapper[4791]: I0218 01:28:00.061779 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:28:00 crc kubenswrapper[4791]: E0218 01:28:00.062274 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:28:00 crc kubenswrapper[4791]: I0218 01:28:00.905004 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-99trp" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="registry-server" containerID="cri-o://fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4" gracePeriod=2 Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.477232 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.625842 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vk6nv\" (UniqueName: \"kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv\") pod \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.626064 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content\") pod \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.627518 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities\") pod \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\" (UID: \"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01\") " Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.628772 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities" (OuterVolumeSpecName: "utilities") pod "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" (UID: "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.630057 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.635400 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv" (OuterVolumeSpecName: "kube-api-access-vk6nv") pod "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" (UID: "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01"). InnerVolumeSpecName "kube-api-access-vk6nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.683328 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" (UID: "e63725d4-df95-4ff6-a99a-ef1f5e7c2a01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.732990 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vk6nv\" (UniqueName: \"kubernetes.io/projected/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-kube-api-access-vk6nv\") on node \"crc\" DevicePath \"\"" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.733032 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.915850 4791 generic.go:334] "Generic (PLEG): container finished" podID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerID="fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4" exitCode=0 Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.915895 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerDied","Data":"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4"} Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.915921 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99trp" event={"ID":"e63725d4-df95-4ff6-a99a-ef1f5e7c2a01","Type":"ContainerDied","Data":"79e13dacaf801b36dd0657cada0c5bb6d9e99ac6d70770cac73328b8ff4aeb6c"} Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.915938 4791 scope.go:117] "RemoveContainer" containerID="fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.915936 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99trp" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.943407 4791 scope.go:117] "RemoveContainer" containerID="2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09" Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.953791 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.966824 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-99trp"] Feb 18 01:28:01 crc kubenswrapper[4791]: I0218 01:28:01.988027 4791 scope.go:117] "RemoveContainer" containerID="1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.046136 4791 scope.go:117] "RemoveContainer" containerID="fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4" Feb 18 01:28:02 crc kubenswrapper[4791]: E0218 01:28:02.046586 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4\": container with ID starting with fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4 not found: ID does not exist" containerID="fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.046641 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4"} err="failed to get container status \"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4\": rpc error: code = NotFound desc = could not find container \"fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4\": container with ID starting with fb86d6c8817549f1400eb4790108f2864803381544d9779c170f1c818070a5e4 not found: ID does not exist" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.046676 4791 scope.go:117] "RemoveContainer" containerID="2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09" Feb 18 01:28:02 crc kubenswrapper[4791]: E0218 01:28:02.047169 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09\": container with ID starting with 2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09 not found: ID does not exist" containerID="2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.047196 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09"} err="failed to get container status \"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09\": rpc error: code = NotFound desc = could not find container \"2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09\": container with ID starting with 2cbcfeea0a1626afb7289e167fefb0a2f59fa6e4ccc2b0c02df1075e4c50ac09 not found: ID does not exist" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.047213 4791 scope.go:117] "RemoveContainer" containerID="1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250" Feb 18 01:28:02 crc kubenswrapper[4791]: E0218 01:28:02.047689 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250\": container with ID starting with 1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250 not found: ID does not exist" containerID="1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250" Feb 18 01:28:02 crc kubenswrapper[4791]: I0218 01:28:02.047724 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250"} err="failed to get container status \"1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250\": rpc error: code = NotFound desc = could not find container \"1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250\": container with ID starting with 1051e83d94911c0f5bbdbbd191d33d8d9a98fc71289cc59e40e8f15c3f793250 not found: ID does not exist" Feb 18 01:28:03 crc kubenswrapper[4791]: I0218 01:28:03.074324 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" path="/var/lib/kubelet/pods/e63725d4-df95-4ff6-a99a-ef1f5e7c2a01/volumes" Feb 18 01:28:07 crc kubenswrapper[4791]: E0218 01:28:07.068886 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:28:11 crc kubenswrapper[4791]: E0218 01:28:11.066037 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:28:15 crc kubenswrapper[4791]: I0218 01:28:15.061423 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:28:15 crc kubenswrapper[4791]: E0218 01:28:15.062206 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:28:22 crc kubenswrapper[4791]: E0218 01:28:22.063755 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:28:22 crc kubenswrapper[4791]: E0218 01:28:22.063812 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:28:26 crc kubenswrapper[4791]: I0218 01:28:26.062248 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:28:26 crc kubenswrapper[4791]: E0218 01:28:26.063244 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:28:33 crc kubenswrapper[4791]: E0218 01:28:33.064027 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:28:36 crc kubenswrapper[4791]: E0218 01:28:36.063784 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:28:40 crc kubenswrapper[4791]: I0218 01:28:40.061910 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:28:40 crc kubenswrapper[4791]: E0218 01:28:40.062782 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:28:47 crc kubenswrapper[4791]: E0218 01:28:47.065388 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:28:48 crc kubenswrapper[4791]: E0218 01:28:48.062656 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:28:53 crc kubenswrapper[4791]: I0218 01:28:53.061966 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:28:53 crc kubenswrapper[4791]: E0218 01:28:53.062707 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:28:58 crc kubenswrapper[4791]: E0218 01:28:58.067060 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.405743 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:28:58 crc kubenswrapper[4791]: E0218 01:28:58.406292 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="extract-content" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.406312 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="extract-content" Feb 18 01:28:58 crc kubenswrapper[4791]: E0218 01:28:58.406332 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="extract-utilities" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.406341 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="extract-utilities" Feb 18 01:28:58 crc kubenswrapper[4791]: E0218 01:28:58.406399 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="registry-server" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.406408 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="registry-server" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.406717 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e63725d4-df95-4ff6-a99a-ef1f5e7c2a01" containerName="registry-server" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.408977 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.428265 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.517924 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.518048 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.519559 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d5hp\" (UniqueName: \"kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.621615 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.621986 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d5hp\" (UniqueName: \"kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.622097 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.622291 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.622512 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.642868 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d5hp\" (UniqueName: \"kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp\") pod \"certified-operators-wkvq7\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:58 crc kubenswrapper[4791]: I0218 01:28:58.732519 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:28:59 crc kubenswrapper[4791]: I0218 01:28:59.288438 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:28:59 crc kubenswrapper[4791]: I0218 01:28:59.539766 4791 generic.go:334] "Generic (PLEG): container finished" podID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerID="06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef" exitCode=0 Feb 18 01:28:59 crc kubenswrapper[4791]: I0218 01:28:59.539900 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerDied","Data":"06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef"} Feb 18 01:28:59 crc kubenswrapper[4791]: I0218 01:28:59.540262 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerStarted","Data":"9a41de4fa94680e2d2abe630cdd677cd541daa0a3281c414c058ae21ef4ec2f2"} Feb 18 01:29:00 crc kubenswrapper[4791]: I0218 01:29:00.551714 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerStarted","Data":"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4"} Feb 18 01:29:01 crc kubenswrapper[4791]: E0218 01:29:01.065080 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:29:01 crc kubenswrapper[4791]: I0218 01:29:01.564724 4791 generic.go:334] "Generic (PLEG): container finished" podID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerID="81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4" exitCode=0 Feb 18 01:29:01 crc kubenswrapper[4791]: I0218 01:29:01.565959 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerDied","Data":"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4"} Feb 18 01:29:02 crc kubenswrapper[4791]: I0218 01:29:02.578387 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerStarted","Data":"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff"} Feb 18 01:29:02 crc kubenswrapper[4791]: I0218 01:29:02.608832 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wkvq7" podStartSLOduration=2.158562152 podStartE2EDuration="4.608814652s" podCreationTimestamp="2026-02-18 01:28:58 +0000 UTC" firstStartedPulling="2026-02-18 01:28:59.542298247 +0000 UTC m=+3281.110311417" lastFinishedPulling="2026-02-18 01:29:01.992550747 +0000 UTC m=+3283.560563917" observedRunningTime="2026-02-18 01:29:02.606074797 +0000 UTC m=+3284.174087977" watchObservedRunningTime="2026-02-18 01:29:02.608814652 +0000 UTC m=+3284.176827842" Feb 18 01:29:06 crc kubenswrapper[4791]: I0218 01:29:06.062529 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:29:06 crc kubenswrapper[4791]: E0218 01:29:06.064458 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:29:08 crc kubenswrapper[4791]: I0218 01:29:08.734122 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:08 crc kubenswrapper[4791]: I0218 01:29:08.734534 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:08 crc kubenswrapper[4791]: I0218 01:29:08.789574 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:09 crc kubenswrapper[4791]: E0218 01:29:09.077750 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:29:09 crc kubenswrapper[4791]: I0218 01:29:09.740201 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:09 crc kubenswrapper[4791]: I0218 01:29:09.807478 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:29:11 crc kubenswrapper[4791]: I0218 01:29:11.694642 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wkvq7" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="registry-server" containerID="cri-o://aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff" gracePeriod=2 Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.223046 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.299343 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content\") pod \"18cb0170-440e-4fee-98c5-21ccabd2e958\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.299669 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d5hp\" (UniqueName: \"kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp\") pod \"18cb0170-440e-4fee-98c5-21ccabd2e958\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.299766 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities\") pod \"18cb0170-440e-4fee-98c5-21ccabd2e958\" (UID: \"18cb0170-440e-4fee-98c5-21ccabd2e958\") " Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.300675 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities" (OuterVolumeSpecName: "utilities") pod "18cb0170-440e-4fee-98c5-21ccabd2e958" (UID: "18cb0170-440e-4fee-98c5-21ccabd2e958"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.307091 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp" (OuterVolumeSpecName: "kube-api-access-7d5hp") pod "18cb0170-440e-4fee-98c5-21ccabd2e958" (UID: "18cb0170-440e-4fee-98c5-21ccabd2e958"). InnerVolumeSpecName "kube-api-access-7d5hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.349539 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "18cb0170-440e-4fee-98c5-21ccabd2e958" (UID: "18cb0170-440e-4fee-98c5-21ccabd2e958"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.403084 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.403128 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d5hp\" (UniqueName: \"kubernetes.io/projected/18cb0170-440e-4fee-98c5-21ccabd2e958-kube-api-access-7d5hp\") on node \"crc\" DevicePath \"\"" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.403139 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18cb0170-440e-4fee-98c5-21ccabd2e958-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.709019 4791 generic.go:334] "Generic (PLEG): container finished" podID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerID="aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff" exitCode=0 Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.709081 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerDied","Data":"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff"} Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.709230 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wkvq7" event={"ID":"18cb0170-440e-4fee-98c5-21ccabd2e958","Type":"ContainerDied","Data":"9a41de4fa94680e2d2abe630cdd677cd541daa0a3281c414c058ae21ef4ec2f2"} Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.709260 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wkvq7" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.709279 4791 scope.go:117] "RemoveContainer" containerID="aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.739509 4791 scope.go:117] "RemoveContainer" containerID="81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.771827 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.782565 4791 scope.go:117] "RemoveContainer" containerID="06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.794195 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wkvq7"] Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.826877 4791 scope.go:117] "RemoveContainer" containerID="aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff" Feb 18 01:29:12 crc kubenswrapper[4791]: E0218 01:29:12.827640 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff\": container with ID starting with aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff not found: ID does not exist" containerID="aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.827681 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff"} err="failed to get container status \"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff\": rpc error: code = NotFound desc = could not find container \"aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff\": container with ID starting with aa930264f8b5a49f19322b7d7a36322ba23e9cf1a37a86bb7aa35f00a638edff not found: ID does not exist" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.827708 4791 scope.go:117] "RemoveContainer" containerID="81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4" Feb 18 01:29:12 crc kubenswrapper[4791]: E0218 01:29:12.828196 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4\": container with ID starting with 81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4 not found: ID does not exist" containerID="81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.828260 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4"} err="failed to get container status \"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4\": rpc error: code = NotFound desc = could not find container \"81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4\": container with ID starting with 81217a5307a20c673f1bceb323423610aa2ca61b54f5784f09a8e36f2ec05dd4 not found: ID does not exist" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.828303 4791 scope.go:117] "RemoveContainer" containerID="06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef" Feb 18 01:29:12 crc kubenswrapper[4791]: E0218 01:29:12.828715 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef\": container with ID starting with 06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef not found: ID does not exist" containerID="06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef" Feb 18 01:29:12 crc kubenswrapper[4791]: I0218 01:29:12.828749 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef"} err="failed to get container status \"06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef\": rpc error: code = NotFound desc = could not find container \"06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef\": container with ID starting with 06b28cd5f3215d7407188b90af7bbeaf3c179f91c674eb0880f3b5561c3802ef not found: ID does not exist" Feb 18 01:29:13 crc kubenswrapper[4791]: I0218 01:29:13.077399 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" path="/var/lib/kubelet/pods/18cb0170-440e-4fee-98c5-21ccabd2e958/volumes" Feb 18 01:29:16 crc kubenswrapper[4791]: E0218 01:29:16.064681 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:29:20 crc kubenswrapper[4791]: I0218 01:29:20.061839 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:29:20 crc kubenswrapper[4791]: E0218 01:29:20.062497 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:29:24 crc kubenswrapper[4791]: E0218 01:29:24.064703 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:29:29 crc kubenswrapper[4791]: E0218 01:29:29.071722 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:29:34 crc kubenswrapper[4791]: I0218 01:29:34.061095 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:29:34 crc kubenswrapper[4791]: E0218 01:29:34.062018 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:29:37 crc kubenswrapper[4791]: E0218 01:29:37.064691 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:29:42 crc kubenswrapper[4791]: E0218 01:29:42.063361 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:29:45 crc kubenswrapper[4791]: I0218 01:29:45.061818 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:29:45 crc kubenswrapper[4791]: E0218 01:29:45.062697 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.281546 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:29:47 crc kubenswrapper[4791]: E0218 01:29:47.285738 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="registry-server" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.285780 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="registry-server" Feb 18 01:29:47 crc kubenswrapper[4791]: E0218 01:29:47.285796 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="extract-content" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.285802 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="extract-content" Feb 18 01:29:47 crc kubenswrapper[4791]: E0218 01:29:47.285827 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="extract-utilities" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.285833 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="extract-utilities" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.286150 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="18cb0170-440e-4fee-98c5-21ccabd2e958" containerName="registry-server" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.287931 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.309600 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.377304 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.378798 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.379225 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nngq\" (UniqueName: \"kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.480634 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.480932 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.481107 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nngq\" (UniqueName: \"kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.481194 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.481329 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.506194 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nngq\" (UniqueName: \"kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq\") pod \"redhat-operators-c4dkx\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:47 crc kubenswrapper[4791]: I0218 01:29:47.636136 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:48 crc kubenswrapper[4791]: I0218 01:29:48.190050 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:29:49 crc kubenswrapper[4791]: I0218 01:29:49.144902 4791 generic.go:334] "Generic (PLEG): container finished" podID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerID="07435efd4de012a45bee1cb5ae6dfc9db7908dc31dbc3778f91dec80b97a28c9" exitCode=0 Feb 18 01:29:49 crc kubenswrapper[4791]: I0218 01:29:49.145139 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerDied","Data":"07435efd4de012a45bee1cb5ae6dfc9db7908dc31dbc3778f91dec80b97a28c9"} Feb 18 01:29:49 crc kubenswrapper[4791]: I0218 01:29:49.145461 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerStarted","Data":"38d51b6c2d05de903e85ca2c7560e5e860cf68b13cb21f25061c134048f5f93b"} Feb 18 01:29:50 crc kubenswrapper[4791]: E0218 01:29:50.062882 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:29:50 crc kubenswrapper[4791]: I0218 01:29:50.162744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerStarted","Data":"53b0ae91c4cd1b5dec6d4405fd02e41724a1a7b269f5621008ed4d3391b10ee1"} Feb 18 01:29:53 crc kubenswrapper[4791]: E0218 01:29:53.063927 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:29:55 crc kubenswrapper[4791]: I0218 01:29:55.220330 4791 generic.go:334] "Generic (PLEG): container finished" podID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerID="53b0ae91c4cd1b5dec6d4405fd02e41724a1a7b269f5621008ed4d3391b10ee1" exitCode=0 Feb 18 01:29:55 crc kubenswrapper[4791]: I0218 01:29:55.220408 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerDied","Data":"53b0ae91c4cd1b5dec6d4405fd02e41724a1a7b269f5621008ed4d3391b10ee1"} Feb 18 01:29:56 crc kubenswrapper[4791]: I0218 01:29:56.235965 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerStarted","Data":"5e3194af88827a7b8e9a4a6ca3afdba4542427b9dbbcdbb08ededb47c4083852"} Feb 18 01:29:56 crc kubenswrapper[4791]: I0218 01:29:56.257452 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c4dkx" podStartSLOduration=2.768018958 podStartE2EDuration="9.257435848s" podCreationTimestamp="2026-02-18 01:29:47 +0000 UTC" firstStartedPulling="2026-02-18 01:29:49.14775386 +0000 UTC m=+3330.715767040" lastFinishedPulling="2026-02-18 01:29:55.63717075 +0000 UTC m=+3337.205183930" observedRunningTime="2026-02-18 01:29:56.253018891 +0000 UTC m=+3337.821032071" watchObservedRunningTime="2026-02-18 01:29:56.257435848 +0000 UTC m=+3337.825449018" Feb 18 01:29:57 crc kubenswrapper[4791]: I0218 01:29:57.636452 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:57 crc kubenswrapper[4791]: I0218 01:29:57.636852 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:29:58 crc kubenswrapper[4791]: I0218 01:29:58.061116 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:29:58 crc kubenswrapper[4791]: E0218 01:29:58.061462 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:29:58 crc kubenswrapper[4791]: I0218 01:29:58.681007 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c4dkx" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="registry-server" probeResult="failure" output=< Feb 18 01:29:58 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:29:58 crc kubenswrapper[4791]: > Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.172709 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg"] Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.174847 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.178645 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.178991 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.183525 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg"] Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.308486 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd8mf\" (UniqueName: \"kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.308906 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.308995 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.411890 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.411941 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.412064 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd8mf\" (UniqueName: \"kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.413020 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.421062 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.430287 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd8mf\" (UniqueName: \"kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf\") pod \"collect-profiles-29522970-zcpdg\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:00 crc kubenswrapper[4791]: I0218 01:30:00.524364 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:01 crc kubenswrapper[4791]: I0218 01:30:01.121503 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg"] Feb 18 01:30:01 crc kubenswrapper[4791]: W0218 01:30:01.126678 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33486714_6606_48b5_abc5_7f8730631c19.slice/crio-2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8 WatchSource:0}: Error finding container 2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8: Status 404 returned error can't find the container with id 2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8 Feb 18 01:30:01 crc kubenswrapper[4791]: I0218 01:30:01.284080 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" event={"ID":"33486714-6606-48b5-abc5-7f8730631c19","Type":"ContainerStarted","Data":"2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8"} Feb 18 01:30:02 crc kubenswrapper[4791]: I0218 01:30:02.294558 4791 generic.go:334] "Generic (PLEG): container finished" podID="33486714-6606-48b5-abc5-7f8730631c19" containerID="5fcf36ad0b3b870e11fb505cd77e78d616535a4cd244b3fdf3c01949bf52525b" exitCode=0 Feb 18 01:30:02 crc kubenswrapper[4791]: I0218 01:30:02.294602 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" event={"ID":"33486714-6606-48b5-abc5-7f8730631c19","Type":"ContainerDied","Data":"5fcf36ad0b3b870e11fb505cd77e78d616535a4cd244b3fdf3c01949bf52525b"} Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.720053 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.913023 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume\") pod \"33486714-6606-48b5-abc5-7f8730631c19\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.913257 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd8mf\" (UniqueName: \"kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf\") pod \"33486714-6606-48b5-abc5-7f8730631c19\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.913332 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume\") pod \"33486714-6606-48b5-abc5-7f8730631c19\" (UID: \"33486714-6606-48b5-abc5-7f8730631c19\") " Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.914286 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume" (OuterVolumeSpecName: "config-volume") pod "33486714-6606-48b5-abc5-7f8730631c19" (UID: "33486714-6606-48b5-abc5-7f8730631c19"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.920547 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf" (OuterVolumeSpecName: "kube-api-access-gd8mf") pod "33486714-6606-48b5-abc5-7f8730631c19" (UID: "33486714-6606-48b5-abc5-7f8730631c19"). InnerVolumeSpecName "kube-api-access-gd8mf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:30:03 crc kubenswrapper[4791]: I0218 01:30:03.924683 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "33486714-6606-48b5-abc5-7f8730631c19" (UID: "33486714-6606-48b5-abc5-7f8730631c19"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.016889 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33486714-6606-48b5-abc5-7f8730631c19-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.016928 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd8mf\" (UniqueName: \"kubernetes.io/projected/33486714-6606-48b5-abc5-7f8730631c19-kube-api-access-gd8mf\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.016939 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33486714-6606-48b5-abc5-7f8730631c19-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:04 crc kubenswrapper[4791]: E0218 01:30:04.063292 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.314069 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" event={"ID":"33486714-6606-48b5-abc5-7f8730631c19","Type":"ContainerDied","Data":"2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8"} Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.314108 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2cc6431fcea04af0d7106a01cfca7658de622dd661876b5d8eb40ecccdd810b8" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.314165 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522970-zcpdg" Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.802998 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp"] Feb 18 01:30:04 crc kubenswrapper[4791]: I0218 01:30:04.816519 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522925-xkcgp"] Feb 18 01:30:05 crc kubenswrapper[4791]: I0218 01:30:05.086812 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="306b19b9-f9da-46ef-b791-32fa75a9e66d" path="/var/lib/kubelet/pods/306b19b9-f9da-46ef-b791-32fa75a9e66d/volumes" Feb 18 01:30:06 crc kubenswrapper[4791]: E0218 01:30:06.063316 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:30:07 crc kubenswrapper[4791]: I0218 01:30:07.702876 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:30:07 crc kubenswrapper[4791]: I0218 01:30:07.762374 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.058087 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.059400 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c4dkx" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="registry-server" containerID="cri-o://5e3194af88827a7b8e9a4a6ca3afdba4542427b9dbbcdbb08ededb47c4083852" gracePeriod=2 Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.416147 4791 generic.go:334] "Generic (PLEG): container finished" podID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerID="5e3194af88827a7b8e9a4a6ca3afdba4542427b9dbbcdbb08ededb47c4083852" exitCode=0 Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.417098 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerDied","Data":"5e3194af88827a7b8e9a4a6ca3afdba4542427b9dbbcdbb08ededb47c4083852"} Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.617093 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.679679 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content\") pod \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.680024 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities\") pod \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.680133 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nngq\" (UniqueName: \"kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq\") pod \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\" (UID: \"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78\") " Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.680822 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities" (OuterVolumeSpecName: "utilities") pod "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" (UID: "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.685856 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq" (OuterVolumeSpecName: "kube-api-access-9nngq") pod "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" (UID: "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78"). InnerVolumeSpecName "kube-api-access-9nngq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.783381 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.783420 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nngq\" (UniqueName: \"kubernetes.io/projected/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-kube-api-access-9nngq\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.812680 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" (UID: "93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:30:10 crc kubenswrapper[4791]: I0218 01:30:10.885529 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.061632 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:30:11 crc kubenswrapper[4791]: E0218 01:30:11.062047 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.431231 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c4dkx" event={"ID":"93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78","Type":"ContainerDied","Data":"38d51b6c2d05de903e85ca2c7560e5e860cf68b13cb21f25061c134048f5f93b"} Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.431288 4791 scope.go:117] "RemoveContainer" containerID="5e3194af88827a7b8e9a4a6ca3afdba4542427b9dbbcdbb08ededb47c4083852" Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.431562 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c4dkx" Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.465297 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.465324 4791 scope.go:117] "RemoveContainer" containerID="53b0ae91c4cd1b5dec6d4405fd02e41724a1a7b269f5621008ed4d3391b10ee1" Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.482313 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c4dkx"] Feb 18 01:30:11 crc kubenswrapper[4791]: I0218 01:30:11.525604 4791 scope.go:117] "RemoveContainer" containerID="07435efd4de012a45bee1cb5ae6dfc9db7908dc31dbc3778f91dec80b97a28c9" Feb 18 01:30:13 crc kubenswrapper[4791]: I0218 01:30:13.073421 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" path="/var/lib/kubelet/pods/93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78/volumes" Feb 18 01:30:16 crc kubenswrapper[4791]: E0218 01:30:16.063177 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:30:19 crc kubenswrapper[4791]: E0218 01:30:19.062517 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:30:26 crc kubenswrapper[4791]: I0218 01:30:26.061474 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:30:26 crc kubenswrapper[4791]: E0218 01:30:26.062260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:30:30 crc kubenswrapper[4791]: E0218 01:30:30.064881 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:30:31 crc kubenswrapper[4791]: E0218 01:30:31.066248 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:30:33 crc kubenswrapper[4791]: I0218 01:30:33.784719 4791 scope.go:117] "RemoveContainer" containerID="6e840fe0a4351f09c2d46c9a0e057f7caa293f3229e78604dfc2dbe0cf0b908f" Feb 18 01:30:38 crc kubenswrapper[4791]: I0218 01:30:38.062986 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:30:38 crc kubenswrapper[4791]: E0218 01:30:38.063985 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:30:42 crc kubenswrapper[4791]: E0218 01:30:42.064909 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:30:44 crc kubenswrapper[4791]: E0218 01:30:44.064567 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:30:53 crc kubenswrapper[4791]: I0218 01:30:53.062246 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:30:53 crc kubenswrapper[4791]: E0218 01:30:53.063768 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:30:53 crc kubenswrapper[4791]: E0218 01:30:53.067507 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:30:59 crc kubenswrapper[4791]: E0218 01:30:59.073249 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:31:04 crc kubenswrapper[4791]: I0218 01:31:04.061134 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:31:04 crc kubenswrapper[4791]: E0218 01:31:04.061930 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:31:04 crc kubenswrapper[4791]: E0218 01:31:04.065457 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:31:13 crc kubenswrapper[4791]: E0218 01:31:13.065603 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:31:17 crc kubenswrapper[4791]: I0218 01:31:17.062234 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:31:17 crc kubenswrapper[4791]: E0218 01:31:17.063403 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:31:18 crc kubenswrapper[4791]: E0218 01:31:18.063591 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:31:28 crc kubenswrapper[4791]: I0218 01:31:28.061726 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:31:28 crc kubenswrapper[4791]: E0218 01:31:28.063685 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:31:28 crc kubenswrapper[4791]: I0218 01:31:28.514764 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e"} Feb 18 01:31:29 crc kubenswrapper[4791]: E0218 01:31:29.071194 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:31:42 crc kubenswrapper[4791]: E0218 01:31:42.065103 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:31:43 crc kubenswrapper[4791]: E0218 01:31:43.067269 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:31:55 crc kubenswrapper[4791]: E0218 01:31:55.064309 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:31:57 crc kubenswrapper[4791]: E0218 01:31:57.063872 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:32:09 crc kubenswrapper[4791]: E0218 01:32:09.071785 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:32:10 crc kubenswrapper[4791]: I0218 01:32:10.065015 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:32:10 crc kubenswrapper[4791]: E0218 01:32:10.173805 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:32:10 crc kubenswrapper[4791]: E0218 01:32:10.173863 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:32:10 crc kubenswrapper[4791]: E0218 01:32:10.174190 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:32:10 crc kubenswrapper[4791]: E0218 01:32:10.175383 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:32:14 crc kubenswrapper[4791]: I0218 01:32:14.307448 4791 generic.go:334] "Generic (PLEG): container finished" podID="2bee1000-5b84-4271-9e51-adb4f12eaadb" containerID="785350efd1039a12488b9f589c2141c62ac21852fdffa2b3dea38fc3cbc23a94" exitCode=2 Feb 18 01:32:14 crc kubenswrapper[4791]: I0218 01:32:14.307572 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" event={"ID":"2bee1000-5b84-4271-9e51-adb4f12eaadb","Type":"ContainerDied","Data":"785350efd1039a12488b9f589c2141c62ac21852fdffa2b3dea38fc3cbc23a94"} Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.880347 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.905703 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory\") pod \"2bee1000-5b84-4271-9e51-adb4f12eaadb\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.905880 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam\") pod \"2bee1000-5b84-4271-9e51-adb4f12eaadb\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.906137 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4znd5\" (UniqueName: \"kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5\") pod \"2bee1000-5b84-4271-9e51-adb4f12eaadb\" (UID: \"2bee1000-5b84-4271-9e51-adb4f12eaadb\") " Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.911956 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5" (OuterVolumeSpecName: "kube-api-access-4znd5") pod "2bee1000-5b84-4271-9e51-adb4f12eaadb" (UID: "2bee1000-5b84-4271-9e51-adb4f12eaadb"). InnerVolumeSpecName "kube-api-access-4znd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.944796 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2bee1000-5b84-4271-9e51-adb4f12eaadb" (UID: "2bee1000-5b84-4271-9e51-adb4f12eaadb"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:32:15 crc kubenswrapper[4791]: I0218 01:32:15.964165 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory" (OuterVolumeSpecName: "inventory") pod "2bee1000-5b84-4271-9e51-adb4f12eaadb" (UID: "2bee1000-5b84-4271-9e51-adb4f12eaadb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.009138 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4znd5\" (UniqueName: \"kubernetes.io/projected/2bee1000-5b84-4271-9e51-adb4f12eaadb-kube-api-access-4znd5\") on node \"crc\" DevicePath \"\"" Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.009182 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.009192 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2bee1000-5b84-4271-9e51-adb4f12eaadb-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.335229 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" event={"ID":"2bee1000-5b84-4271-9e51-adb4f12eaadb","Type":"ContainerDied","Data":"061752326fa3937dd89777450dabdee334ae8cbc3f130632be5bfc5203f1cf50"} Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.335275 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="061752326fa3937dd89777450dabdee334ae8cbc3f130632be5bfc5203f1cf50" Feb 18 01:32:16 crc kubenswrapper[4791]: I0218 01:32:16.335290 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vs565" Feb 18 01:32:20 crc kubenswrapper[4791]: E0218 01:32:20.064871 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:32:24 crc kubenswrapper[4791]: E0218 01:32:24.063881 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:32:32 crc kubenswrapper[4791]: E0218 01:32:32.064357 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:32:35 crc kubenswrapper[4791]: E0218 01:32:35.064026 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:32:46 crc kubenswrapper[4791]: E0218 01:32:46.064687 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:32:46 crc kubenswrapper[4791]: E0218 01:32:46.064764 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:32:58 crc kubenswrapper[4791]: E0218 01:32:58.087265 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:32:59 crc kubenswrapper[4791]: E0218 01:32:59.227255 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:32:59 crc kubenswrapper[4791]: E0218 01:32:59.227914 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:32:59 crc kubenswrapper[4791]: E0218 01:32:59.228123 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:32:59 crc kubenswrapper[4791]: E0218 01:32:59.229415 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:33:10 crc kubenswrapper[4791]: E0218 01:33:10.063739 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:33:12 crc kubenswrapper[4791]: E0218 01:33:12.063247 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:33:23 crc kubenswrapper[4791]: E0218 01:33:23.064293 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:33:24 crc kubenswrapper[4791]: E0218 01:33:24.063623 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.086786 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5"] Feb 18 01:33:33 crc kubenswrapper[4791]: E0218 01:33:33.087979 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bee1000-5b84-4271-9e51-adb4f12eaadb" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.087993 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bee1000-5b84-4271-9e51-adb4f12eaadb" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:33:33 crc kubenswrapper[4791]: E0218 01:33:33.088013 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33486714-6606-48b5-abc5-7f8730631c19" containerName="collect-profiles" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088020 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="33486714-6606-48b5-abc5-7f8730631c19" containerName="collect-profiles" Feb 18 01:33:33 crc kubenswrapper[4791]: E0218 01:33:33.088038 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="extract-content" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088044 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="extract-content" Feb 18 01:33:33 crc kubenswrapper[4791]: E0218 01:33:33.088070 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="extract-utilities" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088077 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="extract-utilities" Feb 18 01:33:33 crc kubenswrapper[4791]: E0218 01:33:33.088085 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="registry-server" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088090 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="registry-server" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088336 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="93bda2ae-29b0-4b3b-b2d4-bd9fa5e5db78" containerName="registry-server" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088350 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="33486714-6606-48b5-abc5-7f8730631c19" containerName="collect-profiles" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.088376 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bee1000-5b84-4271-9e51-adb4f12eaadb" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.089088 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5"] Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.089174 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.091099 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.092034 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.092738 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.093142 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.206023 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfsp8\" (UniqueName: \"kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.206300 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.206363 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.308599 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfsp8\" (UniqueName: \"kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.308728 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.308762 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.316469 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.316475 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.323447 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfsp8\" (UniqueName: \"kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-68jq5\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.423989 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:33:33 crc kubenswrapper[4791]: I0218 01:33:33.869765 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5"] Feb 18 01:33:34 crc kubenswrapper[4791]: E0218 01:33:34.063387 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:33:34 crc kubenswrapper[4791]: I0218 01:33:34.204869 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" event={"ID":"4f1c775a-693d-40ae-b01c-00632b39e8b1","Type":"ContainerStarted","Data":"de670f2cd12f94d25c1349fa1705952083de5dfabeb9f9141aa9954e527dcb10"} Feb 18 01:33:35 crc kubenswrapper[4791]: I0218 01:33:35.225108 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" event={"ID":"4f1c775a-693d-40ae-b01c-00632b39e8b1","Type":"ContainerStarted","Data":"f04cf9832320978fb8fbfdb56ff1ee5860f580b6cd98adf958626685138c9c04"} Feb 18 01:33:35 crc kubenswrapper[4791]: I0218 01:33:35.254903 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" podStartSLOduration=1.772996362 podStartE2EDuration="2.254877606s" podCreationTimestamp="2026-02-18 01:33:33 +0000 UTC" firstStartedPulling="2026-02-18 01:33:33.878531888 +0000 UTC m=+3555.446545058" lastFinishedPulling="2026-02-18 01:33:34.360413122 +0000 UTC m=+3555.928426302" observedRunningTime="2026-02-18 01:33:35.244575602 +0000 UTC m=+3556.812588782" watchObservedRunningTime="2026-02-18 01:33:35.254877606 +0000 UTC m=+3556.822890786" Feb 18 01:33:38 crc kubenswrapper[4791]: E0218 01:33:38.065451 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:33:48 crc kubenswrapper[4791]: E0218 01:33:48.063565 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:33:49 crc kubenswrapper[4791]: E0218 01:33:49.090331 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:33:56 crc kubenswrapper[4791]: I0218 01:33:56.800115 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:33:56 crc kubenswrapper[4791]: I0218 01:33:56.800755 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:34:00 crc kubenswrapper[4791]: E0218 01:34:00.064197 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.561259 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.564095 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.586604 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.622705 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.622922 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.623456 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29842\" (UniqueName: \"kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.725651 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.725747 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.725787 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29842\" (UniqueName: \"kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.726102 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.726443 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.744852 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29842\" (UniqueName: \"kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842\") pod \"redhat-marketplace-blf6h\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:01 crc kubenswrapper[4791]: I0218 01:34:01.892249 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:02 crc kubenswrapper[4791]: E0218 01:34:02.064258 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:34:02 crc kubenswrapper[4791]: I0218 01:34:02.396109 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:02 crc kubenswrapper[4791]: I0218 01:34:02.525481 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerStarted","Data":"6b7bae110c68bc1ecf2ea80eeb2a399703982b5a6d3d4266e10102d45f3a1882"} Feb 18 01:34:03 crc kubenswrapper[4791]: I0218 01:34:03.538479 4791 generic.go:334] "Generic (PLEG): container finished" podID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerID="0842375efd0a97dfad118cbd9b8c6898a9a1eb040d2efc09e6598eaa735dba33" exitCode=0 Feb 18 01:34:03 crc kubenswrapper[4791]: I0218 01:34:03.538590 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerDied","Data":"0842375efd0a97dfad118cbd9b8c6898a9a1eb040d2efc09e6598eaa735dba33"} Feb 18 01:34:04 crc kubenswrapper[4791]: I0218 01:34:04.549628 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerStarted","Data":"5de73960c29ee89358f3d8a74accb769431f4f55d9af7aa77471430467c7aefd"} Feb 18 01:34:05 crc kubenswrapper[4791]: I0218 01:34:05.564952 4791 generic.go:334] "Generic (PLEG): container finished" podID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerID="5de73960c29ee89358f3d8a74accb769431f4f55d9af7aa77471430467c7aefd" exitCode=0 Feb 18 01:34:05 crc kubenswrapper[4791]: I0218 01:34:05.565065 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerDied","Data":"5de73960c29ee89358f3d8a74accb769431f4f55d9af7aa77471430467c7aefd"} Feb 18 01:34:06 crc kubenswrapper[4791]: I0218 01:34:06.577740 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerStarted","Data":"c9e0916fd5d1dd9d5dfd006f91b0bc71f3d95bf39095d66371f5fb9559c9c40c"} Feb 18 01:34:06 crc kubenswrapper[4791]: I0218 01:34:06.605497 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-blf6h" podStartSLOduration=3.135876269 podStartE2EDuration="5.605472947s" podCreationTimestamp="2026-02-18 01:34:01 +0000 UTC" firstStartedPulling="2026-02-18 01:34:03.540128471 +0000 UTC m=+3585.108141631" lastFinishedPulling="2026-02-18 01:34:06.009725139 +0000 UTC m=+3587.577738309" observedRunningTime="2026-02-18 01:34:06.60292762 +0000 UTC m=+3588.170940800" watchObservedRunningTime="2026-02-18 01:34:06.605472947 +0000 UTC m=+3588.173486157" Feb 18 01:34:11 crc kubenswrapper[4791]: I0218 01:34:11.893088 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:11 crc kubenswrapper[4791]: I0218 01:34:11.893685 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:11 crc kubenswrapper[4791]: I0218 01:34:11.953933 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:12 crc kubenswrapper[4791]: E0218 01:34:12.063440 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:34:12 crc kubenswrapper[4791]: I0218 01:34:12.690648 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:12 crc kubenswrapper[4791]: I0218 01:34:12.751202 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:14 crc kubenswrapper[4791]: I0218 01:34:14.671513 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-blf6h" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="registry-server" containerID="cri-o://c9e0916fd5d1dd9d5dfd006f91b0bc71f3d95bf39095d66371f5fb9559c9c40c" gracePeriod=2 Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.683259 4791 generic.go:334] "Generic (PLEG): container finished" podID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerID="c9e0916fd5d1dd9d5dfd006f91b0bc71f3d95bf39095d66371f5fb9559c9c40c" exitCode=0 Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.683371 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerDied","Data":"c9e0916fd5d1dd9d5dfd006f91b0bc71f3d95bf39095d66371f5fb9559c9c40c"} Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.683886 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blf6h" event={"ID":"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee","Type":"ContainerDied","Data":"6b7bae110c68bc1ecf2ea80eeb2a399703982b5a6d3d4266e10102d45f3a1882"} Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.683908 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b7bae110c68bc1ecf2ea80eeb2a399703982b5a6d3d4266e10102d45f3a1882" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.694604 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.816275 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities\") pod \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.816382 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content\") pod \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.816491 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29842\" (UniqueName: \"kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842\") pod \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\" (UID: \"b57f12b8-45f8-4d23-8ca2-39c6123ae4ee\") " Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.817436 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities" (OuterVolumeSpecName: "utilities") pod "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" (UID: "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.821975 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842" (OuterVolumeSpecName: "kube-api-access-29842") pod "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" (UID: "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee"). InnerVolumeSpecName "kube-api-access-29842". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.848768 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" (UID: "b57f12b8-45f8-4d23-8ca2-39c6123ae4ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.921853 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.921899 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:34:15 crc kubenswrapper[4791]: I0218 01:34:15.921919 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29842\" (UniqueName: \"kubernetes.io/projected/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee-kube-api-access-29842\") on node \"crc\" DevicePath \"\"" Feb 18 01:34:16 crc kubenswrapper[4791]: I0218 01:34:16.691257 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blf6h" Feb 18 01:34:16 crc kubenswrapper[4791]: I0218 01:34:16.722601 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:16 crc kubenswrapper[4791]: I0218 01:34:16.733417 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-blf6h"] Feb 18 01:34:17 crc kubenswrapper[4791]: E0218 01:34:17.064022 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:34:17 crc kubenswrapper[4791]: I0218 01:34:17.077371 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" path="/var/lib/kubelet/pods/b57f12b8-45f8-4d23-8ca2-39c6123ae4ee/volumes" Feb 18 01:34:26 crc kubenswrapper[4791]: I0218 01:34:26.799607 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:34:26 crc kubenswrapper[4791]: I0218 01:34:26.800954 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:34:27 crc kubenswrapper[4791]: E0218 01:34:27.063617 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:34:31 crc kubenswrapper[4791]: E0218 01:34:31.256698 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:34:38 crc kubenswrapper[4791]: E0218 01:34:38.063370 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:34:42 crc kubenswrapper[4791]: E0218 01:34:42.064822 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:34:51 crc kubenswrapper[4791]: E0218 01:34:51.064400 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:34:55 crc kubenswrapper[4791]: E0218 01:34:55.064284 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:34:56 crc kubenswrapper[4791]: I0218 01:34:56.800469 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:34:56 crc kubenswrapper[4791]: I0218 01:34:56.800925 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:34:56 crc kubenswrapper[4791]: I0218 01:34:56.801000 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:34:56 crc kubenswrapper[4791]: I0218 01:34:56.802791 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:34:56 crc kubenswrapper[4791]: I0218 01:34:56.802946 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e" gracePeriod=600 Feb 18 01:34:57 crc kubenswrapper[4791]: I0218 01:34:57.196230 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e" exitCode=0 Feb 18 01:34:57 crc kubenswrapper[4791]: I0218 01:34:57.196336 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e"} Feb 18 01:34:57 crc kubenswrapper[4791]: I0218 01:34:57.196619 4791 scope.go:117] "RemoveContainer" containerID="3cd1bc8e681afde002094b7f387db47144e26b7fef30167b74d341dbe40fb0a5" Feb 18 01:34:58 crc kubenswrapper[4791]: I0218 01:34:58.208456 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9"} Feb 18 01:35:03 crc kubenswrapper[4791]: E0218 01:35:03.064371 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:35:09 crc kubenswrapper[4791]: E0218 01:35:09.064223 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:35:14 crc kubenswrapper[4791]: E0218 01:35:14.065842 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:35:21 crc kubenswrapper[4791]: E0218 01:35:21.063831 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:35:25 crc kubenswrapper[4791]: E0218 01:35:25.064109 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:35:36 crc kubenswrapper[4791]: E0218 01:35:36.065800 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:35:38 crc kubenswrapper[4791]: E0218 01:35:38.063752 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:35:48 crc kubenswrapper[4791]: E0218 01:35:48.064882 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:35:51 crc kubenswrapper[4791]: E0218 01:35:51.063865 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:36:01 crc kubenswrapper[4791]: E0218 01:36:01.065019 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:36:02 crc kubenswrapper[4791]: E0218 01:36:02.063584 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:36:16 crc kubenswrapper[4791]: E0218 01:36:16.064377 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:36:17 crc kubenswrapper[4791]: E0218 01:36:17.062211 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:36:28 crc kubenswrapper[4791]: E0218 01:36:28.065805 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:36:32 crc kubenswrapper[4791]: E0218 01:36:32.064837 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:36:40 crc kubenswrapper[4791]: E0218 01:36:40.065484 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:36:47 crc kubenswrapper[4791]: E0218 01:36:47.064204 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:36:52 crc kubenswrapper[4791]: E0218 01:36:52.064964 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:36:58 crc kubenswrapper[4791]: E0218 01:36:58.063921 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:37:07 crc kubenswrapper[4791]: E0218 01:37:07.063859 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:37:13 crc kubenswrapper[4791]: I0218 01:37:13.064467 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:37:13 crc kubenswrapper[4791]: E0218 01:37:13.186919 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:37:13 crc kubenswrapper[4791]: E0218 01:37:13.186980 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:37:13 crc kubenswrapper[4791]: E0218 01:37:13.187102 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:37:13 crc kubenswrapper[4791]: E0218 01:37:13.189136 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:37:22 crc kubenswrapper[4791]: E0218 01:37:22.063195 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:37:26 crc kubenswrapper[4791]: E0218 01:37:26.064087 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:37:26 crc kubenswrapper[4791]: I0218 01:37:26.800224 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:37:26 crc kubenswrapper[4791]: I0218 01:37:26.800533 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:37:33 crc kubenswrapper[4791]: E0218 01:37:33.066084 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:37:38 crc kubenswrapper[4791]: E0218 01:37:38.064617 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:37:48 crc kubenswrapper[4791]: E0218 01:37:48.064028 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:37:50 crc kubenswrapper[4791]: E0218 01:37:50.064267 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:37:56 crc kubenswrapper[4791]: I0218 01:37:56.800419 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:37:56 crc kubenswrapper[4791]: I0218 01:37:56.801149 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.647433 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:01 crc kubenswrapper[4791]: E0218 01:38:01.648590 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="registry-server" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.648608 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="registry-server" Feb 18 01:38:01 crc kubenswrapper[4791]: E0218 01:38:01.648640 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="extract-utilities" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.648649 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="extract-utilities" Feb 18 01:38:01 crc kubenswrapper[4791]: E0218 01:38:01.648666 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="extract-content" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.648674 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="extract-content" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.648954 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57f12b8-45f8-4d23-8ca2-39c6123ae4ee" containerName="registry-server" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.650854 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.664353 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.721948 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.722036 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpw6k\" (UniqueName: \"kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.722174 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.824852 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.825204 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.825240 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpw6k\" (UniqueName: \"kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.826062 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:01 crc kubenswrapper[4791]: I0218 01:38:01.826360 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:02 crc kubenswrapper[4791]: E0218 01:38:02.063586 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:38:02 crc kubenswrapper[4791]: I0218 01:38:02.493714 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpw6k\" (UniqueName: \"kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k\") pod \"community-operators-42mpw\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:02 crc kubenswrapper[4791]: I0218 01:38:02.575118 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:03 crc kubenswrapper[4791]: E0218 01:38:03.185956 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:38:03 crc kubenswrapper[4791]: E0218 01:38:03.186294 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:38:03 crc kubenswrapper[4791]: E0218 01:38:03.186420 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:38:03 crc kubenswrapper[4791]: E0218 01:38:03.187630 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:38:03 crc kubenswrapper[4791]: I0218 01:38:03.275822 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:03 crc kubenswrapper[4791]: I0218 01:38:03.318239 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerStarted","Data":"8ea9c918be5791c797578bf2f86f439c8516e8d393650f90981add86cd9a362b"} Feb 18 01:38:04 crc kubenswrapper[4791]: I0218 01:38:04.330784 4791 generic.go:334] "Generic (PLEG): container finished" podID="6225c118-ac64-4c01-911a-aada8c697366" containerID="4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9" exitCode=0 Feb 18 01:38:04 crc kubenswrapper[4791]: I0218 01:38:04.330866 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerDied","Data":"4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9"} Feb 18 01:38:06 crc kubenswrapper[4791]: I0218 01:38:06.355203 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerStarted","Data":"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd"} Feb 18 01:38:07 crc kubenswrapper[4791]: I0218 01:38:07.369779 4791 generic.go:334] "Generic (PLEG): container finished" podID="6225c118-ac64-4c01-911a-aada8c697366" containerID="4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd" exitCode=0 Feb 18 01:38:07 crc kubenswrapper[4791]: I0218 01:38:07.369822 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerDied","Data":"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd"} Feb 18 01:38:08 crc kubenswrapper[4791]: I0218 01:38:08.381105 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerStarted","Data":"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67"} Feb 18 01:38:08 crc kubenswrapper[4791]: I0218 01:38:08.409195 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-42mpw" podStartSLOduration=3.962748912 podStartE2EDuration="7.409177397s" podCreationTimestamp="2026-02-18 01:38:01 +0000 UTC" firstStartedPulling="2026-02-18 01:38:04.333101329 +0000 UTC m=+3825.901114509" lastFinishedPulling="2026-02-18 01:38:07.779529824 +0000 UTC m=+3829.347542994" observedRunningTime="2026-02-18 01:38:08.402454332 +0000 UTC m=+3829.970467542" watchObservedRunningTime="2026-02-18 01:38:08.409177397 +0000 UTC m=+3829.977190567" Feb 18 01:38:12 crc kubenswrapper[4791]: I0218 01:38:12.577141 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:12 crc kubenswrapper[4791]: I0218 01:38:12.578747 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:12 crc kubenswrapper[4791]: I0218 01:38:12.625771 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:13 crc kubenswrapper[4791]: E0218 01:38:13.063381 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:38:13 crc kubenswrapper[4791]: I0218 01:38:13.474216 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:13 crc kubenswrapper[4791]: I0218 01:38:13.517266 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:15 crc kubenswrapper[4791]: E0218 01:38:15.063705 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:38:15 crc kubenswrapper[4791]: I0218 01:38:15.450643 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-42mpw" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="registry-server" containerID="cri-o://29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67" gracePeriod=2 Feb 18 01:38:15 crc kubenswrapper[4791]: I0218 01:38:15.975849 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.111993 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpw6k\" (UniqueName: \"kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k\") pod \"6225c118-ac64-4c01-911a-aada8c697366\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.112117 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content\") pod \"6225c118-ac64-4c01-911a-aada8c697366\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.112451 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities\") pod \"6225c118-ac64-4c01-911a-aada8c697366\" (UID: \"6225c118-ac64-4c01-911a-aada8c697366\") " Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.113257 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities" (OuterVolumeSpecName: "utilities") pod "6225c118-ac64-4c01-911a-aada8c697366" (UID: "6225c118-ac64-4c01-911a-aada8c697366"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.123927 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k" (OuterVolumeSpecName: "kube-api-access-mpw6k") pod "6225c118-ac64-4c01-911a-aada8c697366" (UID: "6225c118-ac64-4c01-911a-aada8c697366"). InnerVolumeSpecName "kube-api-access-mpw6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.159747 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6225c118-ac64-4c01-911a-aada8c697366" (UID: "6225c118-ac64-4c01-911a-aada8c697366"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.216349 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.216385 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpw6k\" (UniqueName: \"kubernetes.io/projected/6225c118-ac64-4c01-911a-aada8c697366-kube-api-access-mpw6k\") on node \"crc\" DevicePath \"\"" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.216399 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6225c118-ac64-4c01-911a-aada8c697366-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.461823 4791 generic.go:334] "Generic (PLEG): container finished" podID="6225c118-ac64-4c01-911a-aada8c697366" containerID="29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67" exitCode=0 Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.461876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerDied","Data":"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67"} Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.461885 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-42mpw" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.461910 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-42mpw" event={"ID":"6225c118-ac64-4c01-911a-aada8c697366","Type":"ContainerDied","Data":"8ea9c918be5791c797578bf2f86f439c8516e8d393650f90981add86cd9a362b"} Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.461935 4791 scope.go:117] "RemoveContainer" containerID="29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.488285 4791 scope.go:117] "RemoveContainer" containerID="4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.497648 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.508010 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-42mpw"] Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.517868 4791 scope.go:117] "RemoveContainer" containerID="4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.568719 4791 scope.go:117] "RemoveContainer" containerID="29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67" Feb 18 01:38:16 crc kubenswrapper[4791]: E0218 01:38:16.569149 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67\": container with ID starting with 29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67 not found: ID does not exist" containerID="29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.569361 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67"} err="failed to get container status \"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67\": rpc error: code = NotFound desc = could not find container \"29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67\": container with ID starting with 29e8660eb60d7c03f0edcbe54c42b7d0f91b680c0508a401c08c1948de7c6c67 not found: ID does not exist" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.569387 4791 scope.go:117] "RemoveContainer" containerID="4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd" Feb 18 01:38:16 crc kubenswrapper[4791]: E0218 01:38:16.569709 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd\": container with ID starting with 4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd not found: ID does not exist" containerID="4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.569764 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd"} err="failed to get container status \"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd\": rpc error: code = NotFound desc = could not find container \"4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd\": container with ID starting with 4152af5dfed9ac1710ceae1f2bc18f097640d6b13b58161fc4261dedab0996dd not found: ID does not exist" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.569796 4791 scope.go:117] "RemoveContainer" containerID="4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9" Feb 18 01:38:16 crc kubenswrapper[4791]: E0218 01:38:16.570141 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9\": container with ID starting with 4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9 not found: ID does not exist" containerID="4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9" Feb 18 01:38:16 crc kubenswrapper[4791]: I0218 01:38:16.570184 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9"} err="failed to get container status \"4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9\": rpc error: code = NotFound desc = could not find container \"4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9\": container with ID starting with 4e167c005130d6f042ba3e239eb428eb2a9e48722703a6d6dfb7a8002266bee9 not found: ID does not exist" Feb 18 01:38:17 crc kubenswrapper[4791]: I0218 01:38:17.074664 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6225c118-ac64-4c01-911a-aada8c697366" path="/var/lib/kubelet/pods/6225c118-ac64-4c01-911a-aada8c697366/volumes" Feb 18 01:38:26 crc kubenswrapper[4791]: I0218 01:38:26.800431 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:38:26 crc kubenswrapper[4791]: I0218 01:38:26.801041 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:38:26 crc kubenswrapper[4791]: I0218 01:38:26.801093 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:38:26 crc kubenswrapper[4791]: I0218 01:38:26.801978 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:38:26 crc kubenswrapper[4791]: I0218 01:38:26.802037 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" gracePeriod=600 Feb 18 01:38:26 crc kubenswrapper[4791]: E0218 01:38:26.937195 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:38:27 crc kubenswrapper[4791]: E0218 01:38:27.062700 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:38:27 crc kubenswrapper[4791]: I0218 01:38:27.575488 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" exitCode=0 Feb 18 01:38:27 crc kubenswrapper[4791]: I0218 01:38:27.575535 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9"} Feb 18 01:38:27 crc kubenswrapper[4791]: I0218 01:38:27.575567 4791 scope.go:117] "RemoveContainer" containerID="98dff6a03f3550c58e5b26455aa79fc869a37bdfd0b430a04ca162d0fc043c5e" Feb 18 01:38:27 crc kubenswrapper[4791]: I0218 01:38:27.576518 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:38:27 crc kubenswrapper[4791]: E0218 01:38:27.576805 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:38:30 crc kubenswrapper[4791]: E0218 01:38:30.063603 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:38:40 crc kubenswrapper[4791]: E0218 01:38:40.064479 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:38:41 crc kubenswrapper[4791]: E0218 01:38:41.064021 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:38:42 crc kubenswrapper[4791]: I0218 01:38:42.061550 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:38:42 crc kubenswrapper[4791]: E0218 01:38:42.062139 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:38:51 crc kubenswrapper[4791]: E0218 01:38:51.067127 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:38:52 crc kubenswrapper[4791]: E0218 01:38:52.064019 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:38:57 crc kubenswrapper[4791]: I0218 01:38:57.061310 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:38:57 crc kubenswrapper[4791]: E0218 01:38:57.062189 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.274862 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:38:59 crc kubenswrapper[4791]: E0218 01:38:59.275840 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="extract-content" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.275856 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="extract-content" Feb 18 01:38:59 crc kubenswrapper[4791]: E0218 01:38:59.275904 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="extract-utilities" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.275911 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="extract-utilities" Feb 18 01:38:59 crc kubenswrapper[4791]: E0218 01:38:59.275924 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="registry-server" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.275930 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="registry-server" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.276134 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="6225c118-ac64-4c01-911a-aada8c697366" containerName="registry-server" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.277826 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.293469 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.360836 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.360912 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.360956 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgn6k\" (UniqueName: \"kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.463863 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.463971 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.464030 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgn6k\" (UniqueName: \"kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.464912 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.465073 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.509685 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgn6k\" (UniqueName: \"kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k\") pod \"certified-operators-l29mr\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:38:59 crc kubenswrapper[4791]: I0218 01:38:59.614823 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:00 crc kubenswrapper[4791]: W0218 01:39:00.174318 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9a7711c_a7c2_40f4_953f_d83eee1ed3cb.slice/crio-b4d6567c620bf8844e17379e6f8959243e217ee5504cfca9305c1781de76c4f6 WatchSource:0}: Error finding container b4d6567c620bf8844e17379e6f8959243e217ee5504cfca9305c1781de76c4f6: Status 404 returned error can't find the container with id b4d6567c620bf8844e17379e6f8959243e217ee5504cfca9305c1781de76c4f6 Feb 18 01:39:00 crc kubenswrapper[4791]: I0218 01:39:00.175756 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:39:00 crc kubenswrapper[4791]: I0218 01:39:00.998127 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerStarted","Data":"b4d6567c620bf8844e17379e6f8959243e217ee5504cfca9305c1781de76c4f6"} Feb 18 01:39:02 crc kubenswrapper[4791]: I0218 01:39:02.011029 4791 generic.go:334] "Generic (PLEG): container finished" podID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerID="09532e4c1d4d6675ddfc3a810aba1b18864425b03a969643376baaaf0c802a0c" exitCode=0 Feb 18 01:39:02 crc kubenswrapper[4791]: I0218 01:39:02.011129 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerDied","Data":"09532e4c1d4d6675ddfc3a810aba1b18864425b03a969643376baaaf0c802a0c"} Feb 18 01:39:03 crc kubenswrapper[4791]: I0218 01:39:03.038836 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerStarted","Data":"64b5208afd3d53ab51f9993aabb3e2d1b6a30eced4e20486587972289c27c575"} Feb 18 01:39:03 crc kubenswrapper[4791]: E0218 01:39:03.062500 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:39:05 crc kubenswrapper[4791]: I0218 01:39:05.059990 4791 generic.go:334] "Generic (PLEG): container finished" podID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerID="64b5208afd3d53ab51f9993aabb3e2d1b6a30eced4e20486587972289c27c575" exitCode=0 Feb 18 01:39:05 crc kubenswrapper[4791]: I0218 01:39:05.060067 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerDied","Data":"64b5208afd3d53ab51f9993aabb3e2d1b6a30eced4e20486587972289c27c575"} Feb 18 01:39:05 crc kubenswrapper[4791]: E0218 01:39:05.065657 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:39:06 crc kubenswrapper[4791]: I0218 01:39:06.076184 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerStarted","Data":"0b342aafb34c9a6e9bf8ee245a035a7d8fc66b07bf8466451b9723048298b793"} Feb 18 01:39:06 crc kubenswrapper[4791]: I0218 01:39:06.099623 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l29mr" podStartSLOduration=3.655731433 podStartE2EDuration="7.09960023s" podCreationTimestamp="2026-02-18 01:38:59 +0000 UTC" firstStartedPulling="2026-02-18 01:39:02.01398109 +0000 UTC m=+3883.581994270" lastFinishedPulling="2026-02-18 01:39:05.457849897 +0000 UTC m=+3887.025863067" observedRunningTime="2026-02-18 01:39:06.093498744 +0000 UTC m=+3887.661511924" watchObservedRunningTime="2026-02-18 01:39:06.09960023 +0000 UTC m=+3887.667613410" Feb 18 01:39:09 crc kubenswrapper[4791]: I0218 01:39:09.071904 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:39:09 crc kubenswrapper[4791]: E0218 01:39:09.072587 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:39:09 crc kubenswrapper[4791]: I0218 01:39:09.615668 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:09 crc kubenswrapper[4791]: I0218 01:39:09.615731 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:09 crc kubenswrapper[4791]: I0218 01:39:09.690148 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:10 crc kubenswrapper[4791]: I0218 01:39:10.187675 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:10 crc kubenswrapper[4791]: I0218 01:39:10.259467 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:39:12 crc kubenswrapper[4791]: I0218 01:39:12.140335 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l29mr" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="registry-server" containerID="cri-o://0b342aafb34c9a6e9bf8ee245a035a7d8fc66b07bf8466451b9723048298b793" gracePeriod=2 Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.152268 4791 generic.go:334] "Generic (PLEG): container finished" podID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerID="0b342aafb34c9a6e9bf8ee245a035a7d8fc66b07bf8466451b9723048298b793" exitCode=0 Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.152566 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerDied","Data":"0b342aafb34c9a6e9bf8ee245a035a7d8fc66b07bf8466451b9723048298b793"} Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.361126 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.469535 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgn6k\" (UniqueName: \"kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k\") pod \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.469724 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content\") pod \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.469834 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities\") pod \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\" (UID: \"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb\") " Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.470600 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities" (OuterVolumeSpecName: "utilities") pod "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" (UID: "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.476952 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k" (OuterVolumeSpecName: "kube-api-access-wgn6k") pod "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" (UID: "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb"). InnerVolumeSpecName "kube-api-access-wgn6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.572884 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.572915 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgn6k\" (UniqueName: \"kubernetes.io/projected/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-kube-api-access-wgn6k\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.913792 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" (UID: "e9a7711c-a7c2-40f4-953f-d83eee1ed3cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:39:13 crc kubenswrapper[4791]: I0218 01:39:13.982180 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.163293 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l29mr" event={"ID":"e9a7711c-a7c2-40f4-953f-d83eee1ed3cb","Type":"ContainerDied","Data":"b4d6567c620bf8844e17379e6f8959243e217ee5504cfca9305c1781de76c4f6"} Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.163383 4791 scope.go:117] "RemoveContainer" containerID="0b342aafb34c9a6e9bf8ee245a035a7d8fc66b07bf8466451b9723048298b793" Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.163441 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l29mr" Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.186315 4791 scope.go:117] "RemoveContainer" containerID="64b5208afd3d53ab51f9993aabb3e2d1b6a30eced4e20486587972289c27c575" Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.205290 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.244241 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l29mr"] Feb 18 01:39:14 crc kubenswrapper[4791]: I0218 01:39:14.713550 4791 scope.go:117] "RemoveContainer" containerID="09532e4c1d4d6675ddfc3a810aba1b18864425b03a969643376baaaf0c802a0c" Feb 18 01:39:15 crc kubenswrapper[4791]: I0218 01:39:15.077921 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" path="/var/lib/kubelet/pods/e9a7711c-a7c2-40f4-953f-d83eee1ed3cb/volumes" Feb 18 01:39:16 crc kubenswrapper[4791]: E0218 01:39:16.063376 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:39:20 crc kubenswrapper[4791]: I0218 01:39:20.061685 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:39:20 crc kubenswrapper[4791]: E0218 01:39:20.062923 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:39:20 crc kubenswrapper[4791]: E0218 01:39:20.064484 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:39:29 crc kubenswrapper[4791]: E0218 01:39:29.075974 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:39:31 crc kubenswrapper[4791]: I0218 01:39:31.063691 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:39:31 crc kubenswrapper[4791]: E0218 01:39:31.065267 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:39:33 crc kubenswrapper[4791]: E0218 01:39:33.064545 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:39:42 crc kubenswrapper[4791]: E0218 01:39:42.064260 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:39:44 crc kubenswrapper[4791]: I0218 01:39:44.062436 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:39:44 crc kubenswrapper[4791]: E0218 01:39:44.063569 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:39:45 crc kubenswrapper[4791]: E0218 01:39:45.065233 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.860034 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:39:48 crc kubenswrapper[4791]: E0218 01:39:48.867209 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="extract-content" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.867324 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="extract-content" Feb 18 01:39:48 crc kubenswrapper[4791]: E0218 01:39:48.867418 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="registry-server" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.867496 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="registry-server" Feb 18 01:39:48 crc kubenswrapper[4791]: E0218 01:39:48.867582 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="extract-utilities" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.867657 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="extract-utilities" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.868063 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9a7711c-a7c2-40f4-953f-d83eee1ed3cb" containerName="registry-server" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.870491 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.879336 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.965927 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.966054 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:48 crc kubenswrapper[4791]: I0218 01:39:48.966152 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f79lf\" (UniqueName: \"kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.067675 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f79lf\" (UniqueName: \"kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.067765 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.067855 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.068347 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.068569 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.101233 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f79lf\" (UniqueName: \"kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf\") pod \"redhat-operators-x5c9v\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.205443 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:49 crc kubenswrapper[4791]: I0218 01:39:49.771617 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:39:49 crc kubenswrapper[4791]: W0218 01:39:49.773729 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45e1684d_3ec9_42ac_9e7b_71aa8e1d9eb1.slice/crio-d63a34ba1db0a92077e5a936f626357ef2d76680d17a5cdd0c9fd03a16d77696 WatchSource:0}: Error finding container d63a34ba1db0a92077e5a936f626357ef2d76680d17a5cdd0c9fd03a16d77696: Status 404 returned error can't find the container with id d63a34ba1db0a92077e5a936f626357ef2d76680d17a5cdd0c9fd03a16d77696 Feb 18 01:39:50 crc kubenswrapper[4791]: I0218 01:39:50.570364 4791 generic.go:334] "Generic (PLEG): container finished" podID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerID="c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c" exitCode=0 Feb 18 01:39:50 crc kubenswrapper[4791]: I0218 01:39:50.570420 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerDied","Data":"c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c"} Feb 18 01:39:50 crc kubenswrapper[4791]: I0218 01:39:50.570465 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerStarted","Data":"d63a34ba1db0a92077e5a936f626357ef2d76680d17a5cdd0c9fd03a16d77696"} Feb 18 01:39:51 crc kubenswrapper[4791]: I0218 01:39:51.580701 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerStarted","Data":"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d"} Feb 18 01:39:53 crc kubenswrapper[4791]: E0218 01:39:53.065207 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:39:55 crc kubenswrapper[4791]: I0218 01:39:55.619321 4791 generic.go:334] "Generic (PLEG): container finished" podID="4f1c775a-693d-40ae-b01c-00632b39e8b1" containerID="f04cf9832320978fb8fbfdb56ff1ee5860f580b6cd98adf958626685138c9c04" exitCode=2 Feb 18 01:39:55 crc kubenswrapper[4791]: I0218 01:39:55.619431 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" event={"ID":"4f1c775a-693d-40ae-b01c-00632b39e8b1","Type":"ContainerDied","Data":"f04cf9832320978fb8fbfdb56ff1ee5860f580b6cd98adf958626685138c9c04"} Feb 18 01:39:56 crc kubenswrapper[4791]: E0218 01:39:56.064127 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.065437 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:39:57 crc kubenswrapper[4791]: E0218 01:39:57.086284 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.152328 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.293407 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam\") pod \"4f1c775a-693d-40ae-b01c-00632b39e8b1\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.293517 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory\") pod \"4f1c775a-693d-40ae-b01c-00632b39e8b1\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.293813 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfsp8\" (UniqueName: \"kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8\") pod \"4f1c775a-693d-40ae-b01c-00632b39e8b1\" (UID: \"4f1c775a-693d-40ae-b01c-00632b39e8b1\") " Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.299917 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8" (OuterVolumeSpecName: "kube-api-access-qfsp8") pod "4f1c775a-693d-40ae-b01c-00632b39e8b1" (UID: "4f1c775a-693d-40ae-b01c-00632b39e8b1"). InnerVolumeSpecName "kube-api-access-qfsp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.332162 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory" (OuterVolumeSpecName: "inventory") pod "4f1c775a-693d-40ae-b01c-00632b39e8b1" (UID: "4f1c775a-693d-40ae-b01c-00632b39e8b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.336880 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4f1c775a-693d-40ae-b01c-00632b39e8b1" (UID: "4f1c775a-693d-40ae-b01c-00632b39e8b1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.397036 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.397078 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f1c775a-693d-40ae-b01c-00632b39e8b1-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.397105 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfsp8\" (UniqueName: \"kubernetes.io/projected/4f1c775a-693d-40ae-b01c-00632b39e8b1-kube-api-access-qfsp8\") on node \"crc\" DevicePath \"\"" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.646767 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.646779 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-68jq5" event={"ID":"4f1c775a-693d-40ae-b01c-00632b39e8b1","Type":"ContainerDied","Data":"de670f2cd12f94d25c1349fa1705952083de5dfabeb9f9141aa9954e527dcb10"} Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.647104 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de670f2cd12f94d25c1349fa1705952083de5dfabeb9f9141aa9954e527dcb10" Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.648957 4791 generic.go:334] "Generic (PLEG): container finished" podID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerID="602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d" exitCode=0 Feb 18 01:39:57 crc kubenswrapper[4791]: I0218 01:39:57.649010 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerDied","Data":"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d"} Feb 18 01:39:58 crc kubenswrapper[4791]: I0218 01:39:58.659736 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerStarted","Data":"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1"} Feb 18 01:39:58 crc kubenswrapper[4791]: I0218 01:39:58.677787 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x5c9v" podStartSLOduration=3.133542689 podStartE2EDuration="10.677768486s" podCreationTimestamp="2026-02-18 01:39:48 +0000 UTC" firstStartedPulling="2026-02-18 01:39:50.572629913 +0000 UTC m=+3932.140643083" lastFinishedPulling="2026-02-18 01:39:58.11685571 +0000 UTC m=+3939.684868880" observedRunningTime="2026-02-18 01:39:58.673899518 +0000 UTC m=+3940.241912688" watchObservedRunningTime="2026-02-18 01:39:58.677768486 +0000 UTC m=+3940.245781656" Feb 18 01:39:59 crc kubenswrapper[4791]: I0218 01:39:59.206596 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:39:59 crc kubenswrapper[4791]: I0218 01:39:59.206886 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:40:00 crc kubenswrapper[4791]: I0218 01:40:00.268538 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x5c9v" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="registry-server" probeResult="failure" output=< Feb 18 01:40:00 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:40:00 crc kubenswrapper[4791]: > Feb 18 01:40:08 crc kubenswrapper[4791]: E0218 01:40:08.067482 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:40:09 crc kubenswrapper[4791]: I0218 01:40:09.077489 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:40:09 crc kubenswrapper[4791]: E0218 01:40:09.080000 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:40:09 crc kubenswrapper[4791]: E0218 01:40:09.084685 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:40:09 crc kubenswrapper[4791]: I0218 01:40:09.258102 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:40:09 crc kubenswrapper[4791]: I0218 01:40:09.313676 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:40:09 crc kubenswrapper[4791]: I0218 01:40:09.501330 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:40:10 crc kubenswrapper[4791]: I0218 01:40:10.778982 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x5c9v" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="registry-server" containerID="cri-o://ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1" gracePeriod=2 Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.587319 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.754097 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content\") pod \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.754290 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f79lf\" (UniqueName: \"kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf\") pod \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.754414 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities\") pod \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\" (UID: \"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1\") " Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.755264 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities" (OuterVolumeSpecName: "utilities") pod "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" (UID: "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.755406 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.763642 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf" (OuterVolumeSpecName: "kube-api-access-f79lf") pod "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" (UID: "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1"). InnerVolumeSpecName "kube-api-access-f79lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.792827 4791 generic.go:334] "Generic (PLEG): container finished" podID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerID="ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1" exitCode=0 Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.792872 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerDied","Data":"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1"} Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.792899 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5c9v" event={"ID":"45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1","Type":"ContainerDied","Data":"d63a34ba1db0a92077e5a936f626357ef2d76680d17a5cdd0c9fd03a16d77696"} Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.792916 4791 scope.go:117] "RemoveContainer" containerID="ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.794063 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5c9v" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.841906 4791 scope.go:117] "RemoveContainer" containerID="602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.864861 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f79lf\" (UniqueName: \"kubernetes.io/projected/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-kube-api-access-f79lf\") on node \"crc\" DevicePath \"\"" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.886077 4791 scope.go:117] "RemoveContainer" containerID="c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.895033 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" (UID: "45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.943452 4791 scope.go:117] "RemoveContainer" containerID="ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1" Feb 18 01:40:11 crc kubenswrapper[4791]: E0218 01:40:11.943878 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1\": container with ID starting with ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1 not found: ID does not exist" containerID="ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.943906 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1"} err="failed to get container status \"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1\": rpc error: code = NotFound desc = could not find container \"ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1\": container with ID starting with ece6f24e0331cb4ef3f6f931a0ceffe2ad31f505a163d05ccce2b9d393d437a1 not found: ID does not exist" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.943925 4791 scope.go:117] "RemoveContainer" containerID="602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d" Feb 18 01:40:11 crc kubenswrapper[4791]: E0218 01:40:11.944232 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d\": container with ID starting with 602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d not found: ID does not exist" containerID="602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.944279 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d"} err="failed to get container status \"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d\": rpc error: code = NotFound desc = could not find container \"602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d\": container with ID starting with 602dc511c4dd781f1817a4a5913ac267c51cbea7071eba15e5e6e4a71d4d044d not found: ID does not exist" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.944311 4791 scope.go:117] "RemoveContainer" containerID="c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c" Feb 18 01:40:11 crc kubenswrapper[4791]: E0218 01:40:11.944680 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c\": container with ID starting with c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c not found: ID does not exist" containerID="c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.944706 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c"} err="failed to get container status \"c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c\": rpc error: code = NotFound desc = could not find container \"c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c\": container with ID starting with c89f89aa8d13d1c7b09e3efb70e13ba70b9f3a9ff6d9b40346d3f4f980888d0c not found: ID does not exist" Feb 18 01:40:11 crc kubenswrapper[4791]: I0218 01:40:11.967448 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:40:12 crc kubenswrapper[4791]: I0218 01:40:12.159053 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:40:12 crc kubenswrapper[4791]: I0218 01:40:12.169508 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x5c9v"] Feb 18 01:40:13 crc kubenswrapper[4791]: I0218 01:40:13.079877 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" path="/var/lib/kubelet/pods/45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1/volumes" Feb 18 01:40:22 crc kubenswrapper[4791]: I0218 01:40:22.061459 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:40:22 crc kubenswrapper[4791]: E0218 01:40:22.062305 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:40:22 crc kubenswrapper[4791]: E0218 01:40:22.063626 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:40:24 crc kubenswrapper[4791]: E0218 01:40:24.063874 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:40:34 crc kubenswrapper[4791]: I0218 01:40:34.171020 4791 scope.go:117] "RemoveContainer" containerID="0842375efd0a97dfad118cbd9b8c6898a9a1eb040d2efc09e6598eaa735dba33" Feb 18 01:40:34 crc kubenswrapper[4791]: I0218 01:40:34.194634 4791 scope.go:117] "RemoveContainer" containerID="5de73960c29ee89358f3d8a74accb769431f4f55d9af7aa77471430467c7aefd" Feb 18 01:40:34 crc kubenswrapper[4791]: I0218 01:40:34.269072 4791 scope.go:117] "RemoveContainer" containerID="c9e0916fd5d1dd9d5dfd006f91b0bc71f3d95bf39095d66371f5fb9559c9c40c" Feb 18 01:40:36 crc kubenswrapper[4791]: I0218 01:40:36.061396 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:40:36 crc kubenswrapper[4791]: E0218 01:40:36.061968 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:40:37 crc kubenswrapper[4791]: E0218 01:40:37.066644 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:40:37 crc kubenswrapper[4791]: E0218 01:40:37.066766 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:40:48 crc kubenswrapper[4791]: E0218 01:40:48.063066 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:40:50 crc kubenswrapper[4791]: I0218 01:40:50.062064 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:40:50 crc kubenswrapper[4791]: E0218 01:40:50.063806 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:40:50 crc kubenswrapper[4791]: E0218 01:40:50.064433 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:03 crc kubenswrapper[4791]: E0218 01:41:03.067840 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:41:04 crc kubenswrapper[4791]: E0218 01:41:04.063727 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:05 crc kubenswrapper[4791]: I0218 01:41:05.062267 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:41:05 crc kubenswrapper[4791]: E0218 01:41:05.062652 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:41:15 crc kubenswrapper[4791]: E0218 01:41:15.064130 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:17 crc kubenswrapper[4791]: I0218 01:41:17.062060 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:41:17 crc kubenswrapper[4791]: E0218 01:41:17.062753 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:41:18 crc kubenswrapper[4791]: E0218 01:41:18.063832 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:41:26 crc kubenswrapper[4791]: E0218 01:41:26.065515 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:29 crc kubenswrapper[4791]: I0218 01:41:29.070686 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:41:29 crc kubenswrapper[4791]: E0218 01:41:29.071972 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:41:29 crc kubenswrapper[4791]: E0218 01:41:29.072150 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:41:38 crc kubenswrapper[4791]: E0218 01:41:38.064990 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:40 crc kubenswrapper[4791]: I0218 01:41:40.062222 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:41:40 crc kubenswrapper[4791]: E0218 01:41:40.063694 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:41:41 crc kubenswrapper[4791]: E0218 01:41:41.064500 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:41:51 crc kubenswrapper[4791]: I0218 01:41:51.061224 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:41:51 crc kubenswrapper[4791]: E0218 01:41:51.062100 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:41:53 crc kubenswrapper[4791]: E0218 01:41:53.065338 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:41:56 crc kubenswrapper[4791]: E0218 01:41:56.064302 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:42:06 crc kubenswrapper[4791]: I0218 01:42:06.061268 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:42:06 crc kubenswrapper[4791]: E0218 01:42:06.062136 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:42:07 crc kubenswrapper[4791]: E0218 01:42:07.065078 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:42:07 crc kubenswrapper[4791]: E0218 01:42:07.065148 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:42:18 crc kubenswrapper[4791]: E0218 01:42:18.063533 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:42:19 crc kubenswrapper[4791]: I0218 01:42:19.072935 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:42:19 crc kubenswrapper[4791]: E0218 01:42:19.194073 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:42:19 crc kubenswrapper[4791]: E0218 01:42:19.194133 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:42:19 crc kubenswrapper[4791]: E0218 01:42:19.194259 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:42:19 crc kubenswrapper[4791]: E0218 01:42:19.195324 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:42:20 crc kubenswrapper[4791]: I0218 01:42:20.060811 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:42:20 crc kubenswrapper[4791]: E0218 01:42:20.061398 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:42:30 crc kubenswrapper[4791]: E0218 01:42:30.066954 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:42:34 crc kubenswrapper[4791]: E0218 01:42:34.063430 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.035333 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7"] Feb 18 01:42:35 crc kubenswrapper[4791]: E0218 01:42:35.036052 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f1c775a-693d-40ae-b01c-00632b39e8b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036072 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f1c775a-693d-40ae-b01c-00632b39e8b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:42:35 crc kubenswrapper[4791]: E0218 01:42:35.036087 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="extract-content" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036094 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="extract-content" Feb 18 01:42:35 crc kubenswrapper[4791]: E0218 01:42:35.036114 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="registry-server" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036120 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="registry-server" Feb 18 01:42:35 crc kubenswrapper[4791]: E0218 01:42:35.036176 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="extract-utilities" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036184 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="extract-utilities" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036373 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f1c775a-693d-40ae-b01c-00632b39e8b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.036413 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e1684d-3ec9-42ac-9e7b-71aa8e1d9eb1" containerName="registry-server" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.037179 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.048423 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7"] Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.048987 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.049273 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.049421 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.049618 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.064834 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:42:35 crc kubenswrapper[4791]: E0218 01:42:35.065365 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.076089 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.076203 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8nmq\" (UniqueName: \"kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.076302 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.179069 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.179515 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.180933 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8nmq\" (UniqueName: \"kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.186904 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.192613 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.197676 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8nmq\" (UniqueName: \"kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.375567 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.934808 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7"] Feb 18 01:42:35 crc kubenswrapper[4791]: I0218 01:42:35.954376 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" event={"ID":"8bc0f1f9-4a48-410f-8911-d599f1fcdb84","Type":"ContainerStarted","Data":"31e4a3bb75452650bc7d18741ca32d6c888dbbf718772fe84e78f5310d2790af"} Feb 18 01:42:36 crc kubenswrapper[4791]: I0218 01:42:36.964755 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" event={"ID":"8bc0f1f9-4a48-410f-8911-d599f1fcdb84","Type":"ContainerStarted","Data":"fc6fa5c3ee2811b29765fc969e9ffd4b562c67de8be85e3775799407bbae4d6d"} Feb 18 01:42:36 crc kubenswrapper[4791]: I0218 01:42:36.989014 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" podStartSLOduration=1.450541804 podStartE2EDuration="1.988988702s" podCreationTimestamp="2026-02-18 01:42:35 +0000 UTC" firstStartedPulling="2026-02-18 01:42:35.936207716 +0000 UTC m=+4097.504220886" lastFinishedPulling="2026-02-18 01:42:36.474654594 +0000 UTC m=+4098.042667784" observedRunningTime="2026-02-18 01:42:36.983929098 +0000 UTC m=+4098.551942268" watchObservedRunningTime="2026-02-18 01:42:36.988988702 +0000 UTC m=+4098.557001912" Feb 18 01:42:44 crc kubenswrapper[4791]: E0218 01:42:44.064377 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:42:46 crc kubenswrapper[4791]: I0218 01:42:46.062569 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:42:46 crc kubenswrapper[4791]: E0218 01:42:46.063721 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:42:48 crc kubenswrapper[4791]: E0218 01:42:48.065006 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:42:57 crc kubenswrapper[4791]: E0218 01:42:57.065113 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:43:00 crc kubenswrapper[4791]: E0218 01:43:00.064861 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:43:01 crc kubenswrapper[4791]: I0218 01:43:01.062268 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:43:01 crc kubenswrapper[4791]: E0218 01:43:01.064061 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:43:12 crc kubenswrapper[4791]: I0218 01:43:12.061014 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:43:12 crc kubenswrapper[4791]: E0218 01:43:12.064122 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:43:12 crc kubenswrapper[4791]: E0218 01:43:12.150753 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:43:12 crc kubenswrapper[4791]: E0218 01:43:12.150807 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:43:12 crc kubenswrapper[4791]: E0218 01:43:12.150918 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:43:12 crc kubenswrapper[4791]: E0218 01:43:12.153152 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:43:15 crc kubenswrapper[4791]: E0218 01:43:15.062719 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:43:25 crc kubenswrapper[4791]: E0218 01:43:25.065311 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:43:27 crc kubenswrapper[4791]: I0218 01:43:27.063047 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:43:27 crc kubenswrapper[4791]: I0218 01:43:27.524701 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732"} Feb 18 01:43:30 crc kubenswrapper[4791]: E0218 01:43:30.063120 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:43:40 crc kubenswrapper[4791]: E0218 01:43:40.064346 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:43:45 crc kubenswrapper[4791]: E0218 01:43:45.063301 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:43:53 crc kubenswrapper[4791]: E0218 01:43:53.063733 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:43:58 crc kubenswrapper[4791]: E0218 01:43:58.092168 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:44:04 crc kubenswrapper[4791]: E0218 01:44:04.065984 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:44:13 crc kubenswrapper[4791]: E0218 01:44:13.064693 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:44:19 crc kubenswrapper[4791]: E0218 01:44:19.081949 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:44:27 crc kubenswrapper[4791]: E0218 01:44:27.065409 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:44:31 crc kubenswrapper[4791]: E0218 01:44:31.064661 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:44:40 crc kubenswrapper[4791]: E0218 01:44:40.064661 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.223001 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.225851 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.235810 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.341102 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.341379 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.341516 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ddpj\" (UniqueName: \"kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.443625 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.443701 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.443795 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ddpj\" (UniqueName: \"kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.444184 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.444258 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.465950 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ddpj\" (UniqueName: \"kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj\") pod \"redhat-marketplace-fdg7z\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:41 crc kubenswrapper[4791]: I0218 01:44:41.550975 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:42 crc kubenswrapper[4791]: W0218 01:44:42.097916 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cb6767c_520e_4d77_a910_ce9696e2bfe7.slice/crio-cd1ba1b524caef6cc12ed528f335c35c1f03d9cdaf0ed2e4279da12fddda97e9 WatchSource:0}: Error finding container cd1ba1b524caef6cc12ed528f335c35c1f03d9cdaf0ed2e4279da12fddda97e9: Status 404 returned error can't find the container with id cd1ba1b524caef6cc12ed528f335c35c1f03d9cdaf0ed2e4279da12fddda97e9 Feb 18 01:44:42 crc kubenswrapper[4791]: I0218 01:44:42.100822 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:42 crc kubenswrapper[4791]: I0218 01:44:42.451890 4791 generic.go:334] "Generic (PLEG): container finished" podID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerID="96ff84a37f21524f7b7516538c5d04547f87f7cf49b02d36d3cccb65fff2bc43" exitCode=0 Feb 18 01:44:42 crc kubenswrapper[4791]: I0218 01:44:42.452098 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerDied","Data":"96ff84a37f21524f7b7516538c5d04547f87f7cf49b02d36d3cccb65fff2bc43"} Feb 18 01:44:42 crc kubenswrapper[4791]: I0218 01:44:42.452297 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerStarted","Data":"cd1ba1b524caef6cc12ed528f335c35c1f03d9cdaf0ed2e4279da12fddda97e9"} Feb 18 01:44:43 crc kubenswrapper[4791]: I0218 01:44:43.462591 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerStarted","Data":"949d9836bb799a42125ddeeef509af4b82a2284a68e883f99cfc68d1b61c4a74"} Feb 18 01:44:44 crc kubenswrapper[4791]: I0218 01:44:44.473818 4791 generic.go:334] "Generic (PLEG): container finished" podID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerID="949d9836bb799a42125ddeeef509af4b82a2284a68e883f99cfc68d1b61c4a74" exitCode=0 Feb 18 01:44:44 crc kubenswrapper[4791]: I0218 01:44:44.473906 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerDied","Data":"949d9836bb799a42125ddeeef509af4b82a2284a68e883f99cfc68d1b61c4a74"} Feb 18 01:44:45 crc kubenswrapper[4791]: E0218 01:44:45.064975 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:44:45 crc kubenswrapper[4791]: I0218 01:44:45.488353 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerStarted","Data":"dcb15b73f57fc7bc65683f9bc970b93dddd6c1844911397331896f7bac554dc7"} Feb 18 01:44:45 crc kubenswrapper[4791]: I0218 01:44:45.510311 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fdg7z" podStartSLOduration=2.109511279 podStartE2EDuration="4.510294022s" podCreationTimestamp="2026-02-18 01:44:41 +0000 UTC" firstStartedPulling="2026-02-18 01:44:42.455256361 +0000 UTC m=+4224.023269551" lastFinishedPulling="2026-02-18 01:44:44.856039104 +0000 UTC m=+4226.424052294" observedRunningTime="2026-02-18 01:44:45.505091433 +0000 UTC m=+4227.073104603" watchObservedRunningTime="2026-02-18 01:44:45.510294022 +0000 UTC m=+4227.078307182" Feb 18 01:44:51 crc kubenswrapper[4791]: I0218 01:44:51.551473 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:51 crc kubenswrapper[4791]: I0218 01:44:51.552120 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:51 crc kubenswrapper[4791]: I0218 01:44:51.852572 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:51 crc kubenswrapper[4791]: I0218 01:44:51.939944 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:52 crc kubenswrapper[4791]: E0218 01:44:52.063444 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:44:52 crc kubenswrapper[4791]: I0218 01:44:52.105091 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:53 crc kubenswrapper[4791]: I0218 01:44:53.599838 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fdg7z" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="registry-server" containerID="cri-o://dcb15b73f57fc7bc65683f9bc970b93dddd6c1844911397331896f7bac554dc7" gracePeriod=2 Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.614925 4791 generic.go:334] "Generic (PLEG): container finished" podID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerID="dcb15b73f57fc7bc65683f9bc970b93dddd6c1844911397331896f7bac554dc7" exitCode=0 Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.615021 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerDied","Data":"dcb15b73f57fc7bc65683f9bc970b93dddd6c1844911397331896f7bac554dc7"} Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.737089 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.796294 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ddpj\" (UniqueName: \"kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj\") pod \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.796490 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content\") pod \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.796550 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities\") pod \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\" (UID: \"7cb6767c-520e-4d77-a910-ce9696e2bfe7\") " Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.797630 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities" (OuterVolumeSpecName: "utilities") pod "7cb6767c-520e-4d77-a910-ce9696e2bfe7" (UID: "7cb6767c-520e-4d77-a910-ce9696e2bfe7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.802982 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj" (OuterVolumeSpecName: "kube-api-access-4ddpj") pod "7cb6767c-520e-4d77-a910-ce9696e2bfe7" (UID: "7cb6767c-520e-4d77-a910-ce9696e2bfe7"). InnerVolumeSpecName "kube-api-access-4ddpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.827020 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7cb6767c-520e-4d77-a910-ce9696e2bfe7" (UID: "7cb6767c-520e-4d77-a910-ce9696e2bfe7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.899442 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ddpj\" (UniqueName: \"kubernetes.io/projected/7cb6767c-520e-4d77-a910-ce9696e2bfe7-kube-api-access-4ddpj\") on node \"crc\" DevicePath \"\"" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.899667 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:44:54 crc kubenswrapper[4791]: I0218 01:44:54.899726 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb6767c-520e-4d77-a910-ce9696e2bfe7-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.629872 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdg7z" event={"ID":"7cb6767c-520e-4d77-a910-ce9696e2bfe7","Type":"ContainerDied","Data":"cd1ba1b524caef6cc12ed528f335c35c1f03d9cdaf0ed2e4279da12fddda97e9"} Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.629960 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdg7z" Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.630406 4791 scope.go:117] "RemoveContainer" containerID="dcb15b73f57fc7bc65683f9bc970b93dddd6c1844911397331896f7bac554dc7" Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.658687 4791 scope.go:117] "RemoveContainer" containerID="949d9836bb799a42125ddeeef509af4b82a2284a68e883f99cfc68d1b61c4a74" Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.682056 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.692785 4791 scope.go:117] "RemoveContainer" containerID="96ff84a37f21524f7b7516538c5d04547f87f7cf49b02d36d3cccb65fff2bc43" Feb 18 01:44:55 crc kubenswrapper[4791]: I0218 01:44:55.708450 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdg7z"] Feb 18 01:44:56 crc kubenswrapper[4791]: E0218 01:44:56.063643 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:44:57 crc kubenswrapper[4791]: I0218 01:44:57.073317 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" path="/var/lib/kubelet/pods/7cb6767c-520e-4d77-a910-ce9696e2bfe7/volumes" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.184710 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c"] Feb 18 01:45:00 crc kubenswrapper[4791]: E0218 01:45:00.186028 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="registry-server" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.186052 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="registry-server" Feb 18 01:45:00 crc kubenswrapper[4791]: E0218 01:45:00.186087 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="extract-utilities" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.186098 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="extract-utilities" Feb 18 01:45:00 crc kubenswrapper[4791]: E0218 01:45:00.186197 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="extract-content" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.186213 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="extract-content" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.186610 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cb6767c-520e-4d77-a910-ce9696e2bfe7" containerName="registry-server" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.187919 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.191864 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.192732 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.205279 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c"] Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.242804 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.243413 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.243847 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnr9x\" (UniqueName: \"kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.346799 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.346897 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnr9x\" (UniqueName: \"kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.346985 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.347856 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.353801 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.363946 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnr9x\" (UniqueName: \"kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x\") pod \"collect-profiles-29522985-gmd2c\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:00 crc kubenswrapper[4791]: I0218 01:45:00.526891 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:01 crc kubenswrapper[4791]: W0218 01:45:01.005500 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode65fc914_0298_4557_95e4_df4d1bd6bb57.slice/crio-9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3 WatchSource:0}: Error finding container 9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3: Status 404 returned error can't find the container with id 9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3 Feb 18 01:45:01 crc kubenswrapper[4791]: I0218 01:45:01.013375 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c"] Feb 18 01:45:01 crc kubenswrapper[4791]: I0218 01:45:01.695791 4791 generic.go:334] "Generic (PLEG): container finished" podID="e65fc914-0298-4557-95e4-df4d1bd6bb57" containerID="b4bd0cd7f5e9da03100f0c30c1a46dee02b075aa75076636bafa2e710fa66924" exitCode=0 Feb 18 01:45:01 crc kubenswrapper[4791]: I0218 01:45:01.695845 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" event={"ID":"e65fc914-0298-4557-95e4-df4d1bd6bb57","Type":"ContainerDied","Data":"b4bd0cd7f5e9da03100f0c30c1a46dee02b075aa75076636bafa2e710fa66924"} Feb 18 01:45:01 crc kubenswrapper[4791]: I0218 01:45:01.696094 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" event={"ID":"e65fc914-0298-4557-95e4-df4d1bd6bb57","Type":"ContainerStarted","Data":"9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3"} Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.223239 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.328656 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnr9x\" (UniqueName: \"kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x\") pod \"e65fc914-0298-4557-95e4-df4d1bd6bb57\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.329363 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume\") pod \"e65fc914-0298-4557-95e4-df4d1bd6bb57\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.329431 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume\") pod \"e65fc914-0298-4557-95e4-df4d1bd6bb57\" (UID: \"e65fc914-0298-4557-95e4-df4d1bd6bb57\") " Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.330137 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume" (OuterVolumeSpecName: "config-volume") pod "e65fc914-0298-4557-95e4-df4d1bd6bb57" (UID: "e65fc914-0298-4557-95e4-df4d1bd6bb57"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.333985 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x" (OuterVolumeSpecName: "kube-api-access-gnr9x") pod "e65fc914-0298-4557-95e4-df4d1bd6bb57" (UID: "e65fc914-0298-4557-95e4-df4d1bd6bb57"). InnerVolumeSpecName "kube-api-access-gnr9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.335459 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e65fc914-0298-4557-95e4-df4d1bd6bb57" (UID: "e65fc914-0298-4557-95e4-df4d1bd6bb57"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.432194 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnr9x\" (UniqueName: \"kubernetes.io/projected/e65fc914-0298-4557-95e4-df4d1bd6bb57-kube-api-access-gnr9x\") on node \"crc\" DevicePath \"\"" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.432229 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e65fc914-0298-4557-95e4-df4d1bd6bb57-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.432238 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e65fc914-0298-4557-95e4-df4d1bd6bb57-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.715246 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" event={"ID":"e65fc914-0298-4557-95e4-df4d1bd6bb57","Type":"ContainerDied","Data":"9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3"} Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.715304 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9124d6cf20f0976c67e5f4f84653bc103c8662a05eaefd004d18072aa195c4b3" Feb 18 01:45:03 crc kubenswrapper[4791]: I0218 01:45:03.715313 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29522985-gmd2c" Feb 18 01:45:04 crc kubenswrapper[4791]: I0218 01:45:04.330634 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt"] Feb 18 01:45:04 crc kubenswrapper[4791]: I0218 01:45:04.345320 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522940-p48tt"] Feb 18 01:45:05 crc kubenswrapper[4791]: I0218 01:45:05.079289 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31dc1b9b-9b55-465e-ac36-794af7e2e0bd" path="/var/lib/kubelet/pods/31dc1b9b-9b55-465e-ac36-794af7e2e0bd/volumes" Feb 18 01:45:07 crc kubenswrapper[4791]: E0218 01:45:07.066863 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:45:08 crc kubenswrapper[4791]: E0218 01:45:08.062742 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:45:19 crc kubenswrapper[4791]: E0218 01:45:19.095340 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:45:21 crc kubenswrapper[4791]: E0218 01:45:21.066917 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:45:31 crc kubenswrapper[4791]: E0218 01:45:31.067525 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:45:34 crc kubenswrapper[4791]: E0218 01:45:34.069148 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:45:34 crc kubenswrapper[4791]: I0218 01:45:34.479693 4791 scope.go:117] "RemoveContainer" containerID="ca408d420d1fa739f58904fd48426a5143f4c5378c7160bbba28c72cfe32fe57" Feb 18 01:45:43 crc kubenswrapper[4791]: E0218 01:45:43.065239 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:45:45 crc kubenswrapper[4791]: E0218 01:45:45.066594 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:45:55 crc kubenswrapper[4791]: E0218 01:45:55.063563 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:45:56 crc kubenswrapper[4791]: I0218 01:45:56.799811 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:45:56 crc kubenswrapper[4791]: I0218 01:45:56.800113 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:46:00 crc kubenswrapper[4791]: E0218 01:46:00.064406 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:46:07 crc kubenswrapper[4791]: E0218 01:46:07.065940 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:46:13 crc kubenswrapper[4791]: E0218 01:46:13.068610 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:46:22 crc kubenswrapper[4791]: E0218 01:46:22.063399 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:46:26 crc kubenswrapper[4791]: I0218 01:46:26.799744 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:46:26 crc kubenswrapper[4791]: I0218 01:46:26.800461 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:46:27 crc kubenswrapper[4791]: E0218 01:46:27.064498 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:46:37 crc kubenswrapper[4791]: E0218 01:46:37.065016 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:46:40 crc kubenswrapper[4791]: E0218 01:46:40.063034 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:46:52 crc kubenswrapper[4791]: E0218 01:46:52.063682 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:46:52 crc kubenswrapper[4791]: E0218 01:46:52.064018 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:46:56 crc kubenswrapper[4791]: I0218 01:46:56.800461 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:46:56 crc kubenswrapper[4791]: I0218 01:46:56.801090 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:46:56 crc kubenswrapper[4791]: I0218 01:46:56.801143 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:46:56 crc kubenswrapper[4791]: I0218 01:46:56.801814 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:46:56 crc kubenswrapper[4791]: I0218 01:46:56.801888 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732" gracePeriod=600 Feb 18 01:46:57 crc kubenswrapper[4791]: I0218 01:46:57.105313 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732" exitCode=0 Feb 18 01:46:57 crc kubenswrapper[4791]: I0218 01:46:57.105356 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732"} Feb 18 01:46:57 crc kubenswrapper[4791]: I0218 01:46:57.105387 4791 scope.go:117] "RemoveContainer" containerID="0c66914ba8ca3f5ddbabe2813fa17e382663af710ab17028ec8f40608d73bea9" Feb 18 01:46:58 crc kubenswrapper[4791]: I0218 01:46:58.118361 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a"} Feb 18 01:47:05 crc kubenswrapper[4791]: E0218 01:47:05.066524 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:47:06 crc kubenswrapper[4791]: E0218 01:47:06.064227 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:47:18 crc kubenswrapper[4791]: E0218 01:47:18.063486 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:47:21 crc kubenswrapper[4791]: E0218 01:47:21.066835 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:47:32 crc kubenswrapper[4791]: I0218 01:47:32.064794 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:47:32 crc kubenswrapper[4791]: E0218 01:47:32.206485 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:47:32 crc kubenswrapper[4791]: E0218 01:47:32.206551 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:47:32 crc kubenswrapper[4791]: E0218 01:47:32.206689 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:47:32 crc kubenswrapper[4791]: E0218 01:47:32.208406 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:47:36 crc kubenswrapper[4791]: E0218 01:47:36.063000 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:47:43 crc kubenswrapper[4791]: E0218 01:47:43.077744 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:47:47 crc kubenswrapper[4791]: E0218 01:47:47.306666 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:47:58 crc kubenswrapper[4791]: E0218 01:47:58.063812 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:48:00 crc kubenswrapper[4791]: E0218 01:48:00.065978 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:48:11 crc kubenswrapper[4791]: E0218 01:48:11.064028 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:48:13 crc kubenswrapper[4791]: E0218 01:48:13.362025 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:48:13 crc kubenswrapper[4791]: E0218 01:48:13.362554 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:48:13 crc kubenswrapper[4791]: E0218 01:48:13.362779 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:48:13 crc kubenswrapper[4791]: E0218 01:48:13.363999 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:48:26 crc kubenswrapper[4791]: E0218 01:48:26.063789 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:48:28 crc kubenswrapper[4791]: E0218 01:48:28.064574 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:48:38 crc kubenswrapper[4791]: E0218 01:48:38.063273 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:48:39 crc kubenswrapper[4791]: E0218 01:48:39.072148 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:48:51 crc kubenswrapper[4791]: E0218 01:48:51.063502 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:48:53 crc kubenswrapper[4791]: E0218 01:48:53.062833 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:02 crc kubenswrapper[4791]: E0218 01:49:02.063532 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:49:02 crc kubenswrapper[4791]: I0218 01:49:02.486754 4791 generic.go:334] "Generic (PLEG): container finished" podID="8bc0f1f9-4a48-410f-8911-d599f1fcdb84" containerID="fc6fa5c3ee2811b29765fc969e9ffd4b562c67de8be85e3775799407bbae4d6d" exitCode=2 Feb 18 01:49:02 crc kubenswrapper[4791]: I0218 01:49:02.486796 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" event={"ID":"8bc0f1f9-4a48-410f-8911-d599f1fcdb84","Type":"ContainerDied","Data":"fc6fa5c3ee2811b29765fc969e9ffd4b562c67de8be85e3775799407bbae4d6d"} Feb 18 01:49:04 crc kubenswrapper[4791]: E0218 01:49:04.062329 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:04 crc kubenswrapper[4791]: I0218 01:49:04.510373 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" event={"ID":"8bc0f1f9-4a48-410f-8911-d599f1fcdb84","Type":"ContainerDied","Data":"31e4a3bb75452650bc7d18741ca32d6c888dbbf718772fe84e78f5310d2790af"} Feb 18 01:49:04 crc kubenswrapper[4791]: I0218 01:49:04.510413 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31e4a3bb75452650bc7d18741ca32d6c888dbbf718772fe84e78f5310d2790af" Feb 18 01:49:04 crc kubenswrapper[4791]: I0218 01:49:04.858931 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.008906 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory\") pod \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.009072 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam\") pod \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.009400 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8nmq\" (UniqueName: \"kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq\") pod \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\" (UID: \"8bc0f1f9-4a48-410f-8911-d599f1fcdb84\") " Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.021572 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq" (OuterVolumeSpecName: "kube-api-access-z8nmq") pod "8bc0f1f9-4a48-410f-8911-d599f1fcdb84" (UID: "8bc0f1f9-4a48-410f-8911-d599f1fcdb84"). InnerVolumeSpecName "kube-api-access-z8nmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.042892 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8bc0f1f9-4a48-410f-8911-d599f1fcdb84" (UID: "8bc0f1f9-4a48-410f-8911-d599f1fcdb84"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.054855 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory" (OuterVolumeSpecName: "inventory") pod "8bc0f1f9-4a48-410f-8911-d599f1fcdb84" (UID: "8bc0f1f9-4a48-410f-8911-d599f1fcdb84"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.112979 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8nmq\" (UniqueName: \"kubernetes.io/projected/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-kube-api-access-z8nmq\") on node \"crc\" DevicePath \"\"" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.113250 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.113260 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8bc0f1f9-4a48-410f-8911-d599f1fcdb84-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 01:49:05 crc kubenswrapper[4791]: I0218 01:49:05.519666 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7" Feb 18 01:49:17 crc kubenswrapper[4791]: E0218 01:49:17.065079 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:49:17 crc kubenswrapper[4791]: E0218 01:49:17.065247 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:26 crc kubenswrapper[4791]: I0218 01:49:26.799942 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:49:26 crc kubenswrapper[4791]: I0218 01:49:26.800445 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:49:29 crc kubenswrapper[4791]: E0218 01:49:29.072218 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:49:30 crc kubenswrapper[4791]: E0218 01:49:30.063670 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:41 crc kubenswrapper[4791]: E0218 01:49:41.064536 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:49:42 crc kubenswrapper[4791]: E0218 01:49:42.064536 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:55 crc kubenswrapper[4791]: E0218 01:49:55.064698 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:49:55 crc kubenswrapper[4791]: E0218 01:49:55.064769 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:49:56 crc kubenswrapper[4791]: I0218 01:49:56.800381 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:49:56 crc kubenswrapper[4791]: I0218 01:49:56.800976 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.016672 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:49:58 crc kubenswrapper[4791]: E0218 01:49:58.017273 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65fc914-0298-4557-95e4-df4d1bd6bb57" containerName="collect-profiles" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.017286 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65fc914-0298-4557-95e4-df4d1bd6bb57" containerName="collect-profiles" Feb 18 01:49:58 crc kubenswrapper[4791]: E0218 01:49:58.017319 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc0f1f9-4a48-410f-8911-d599f1fcdb84" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.017328 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc0f1f9-4a48-410f-8911-d599f1fcdb84" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.017564 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65fc914-0298-4557-95e4-df4d1bd6bb57" containerName="collect-profiles" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.017583 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc0f1f9-4a48-410f-8911-d599f1fcdb84" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.019865 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.033096 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.135197 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.135521 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.135706 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhrz7\" (UniqueName: \"kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.237588 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhrz7\" (UniqueName: \"kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.237820 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.237852 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.238390 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.238422 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.275867 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhrz7\" (UniqueName: \"kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7\") pod \"certified-operators-mstx5\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.351111 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:49:58 crc kubenswrapper[4791]: I0218 01:49:58.956421 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:49:59 crc kubenswrapper[4791]: I0218 01:49:59.083547 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerStarted","Data":"d48de60e7d55393534a2f4ccdbfd69c9bea274f55815657fa5fdcdc46df9b8b7"} Feb 18 01:50:00 crc kubenswrapper[4791]: I0218 01:50:00.097740 4791 generic.go:334] "Generic (PLEG): container finished" podID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerID="9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8" exitCode=0 Feb 18 01:50:00 crc kubenswrapper[4791]: I0218 01:50:00.097859 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerDied","Data":"9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8"} Feb 18 01:50:01 crc kubenswrapper[4791]: I0218 01:50:01.115020 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerStarted","Data":"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf"} Feb 18 01:50:03 crc kubenswrapper[4791]: I0218 01:50:03.142578 4791 generic.go:334] "Generic (PLEG): container finished" podID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerID="52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf" exitCode=0 Feb 18 01:50:03 crc kubenswrapper[4791]: I0218 01:50:03.142892 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerDied","Data":"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf"} Feb 18 01:50:04 crc kubenswrapper[4791]: I0218 01:50:04.179078 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerStarted","Data":"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77"} Feb 18 01:50:04 crc kubenswrapper[4791]: I0218 01:50:04.221363 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mstx5" podStartSLOduration=3.754284801 podStartE2EDuration="7.221337955s" podCreationTimestamp="2026-02-18 01:49:57 +0000 UTC" firstStartedPulling="2026-02-18 01:50:00.101252521 +0000 UTC m=+4541.669265701" lastFinishedPulling="2026-02-18 01:50:03.568305655 +0000 UTC m=+4545.136318855" observedRunningTime="2026-02-18 01:50:04.206229392 +0000 UTC m=+4545.774242592" watchObservedRunningTime="2026-02-18 01:50:04.221337955 +0000 UTC m=+4545.789351135" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.233600 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.236871 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.264277 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.363249 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.363619 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tvwm\" (UniqueName: \"kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.363809 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.465997 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tvwm\" (UniqueName: \"kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.466341 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.466690 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.466825 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.467250 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.496244 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tvwm\" (UniqueName: \"kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm\") pod \"redhat-operators-whwtr\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:06 crc kubenswrapper[4791]: I0218 01:50:06.578741 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:07 crc kubenswrapper[4791]: I0218 01:50:07.138332 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:07 crc kubenswrapper[4791]: I0218 01:50:07.217647 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerStarted","Data":"7803d183cee497ee653068fddb302c69c116375aa356bc4c2714f63c78a75a89"} Feb 18 01:50:08 crc kubenswrapper[4791]: I0218 01:50:08.228930 4791 generic.go:334] "Generic (PLEG): container finished" podID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerID="ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd" exitCode=0 Feb 18 01:50:08 crc kubenswrapper[4791]: I0218 01:50:08.229011 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerDied","Data":"ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd"} Feb 18 01:50:08 crc kubenswrapper[4791]: I0218 01:50:08.351961 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:08 crc kubenswrapper[4791]: I0218 01:50:08.352026 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:08 crc kubenswrapper[4791]: I0218 01:50:08.403974 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:09 crc kubenswrapper[4791]: E0218 01:50:09.070338 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:50:09 crc kubenswrapper[4791]: I0218 01:50:09.239994 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerStarted","Data":"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249"} Feb 18 01:50:09 crc kubenswrapper[4791]: I0218 01:50:09.288594 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:10 crc kubenswrapper[4791]: E0218 01:50:10.062977 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:50:10 crc kubenswrapper[4791]: I0218 01:50:10.803244 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:50:11 crc kubenswrapper[4791]: I0218 01:50:11.262059 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mstx5" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="registry-server" containerID="cri-o://00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77" gracePeriod=2 Feb 18 01:50:11 crc kubenswrapper[4791]: I0218 01:50:11.841406 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.003019 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content\") pod \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.003264 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities\") pod \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.003398 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhrz7\" (UniqueName: \"kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7\") pod \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\" (UID: \"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3\") " Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.003663 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities" (OuterVolumeSpecName: "utilities") pod "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" (UID: "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.004046 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.008756 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7" (OuterVolumeSpecName: "kube-api-access-zhrz7") pod "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" (UID: "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3"). InnerVolumeSpecName "kube-api-access-zhrz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.062903 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" (UID: "2cd90cb1-9274-48f7-9a17-6a6bd525e7e3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.105561 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhrz7\" (UniqueName: \"kubernetes.io/projected/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-kube-api-access-zhrz7\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.105595 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.272610 4791 generic.go:334] "Generic (PLEG): container finished" podID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerID="00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77" exitCode=0 Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.272653 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerDied","Data":"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77"} Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.272679 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mstx5" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.272690 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mstx5" event={"ID":"2cd90cb1-9274-48f7-9a17-6a6bd525e7e3","Type":"ContainerDied","Data":"d48de60e7d55393534a2f4ccdbfd69c9bea274f55815657fa5fdcdc46df9b8b7"} Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.272708 4791 scope.go:117] "RemoveContainer" containerID="00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.307197 4791 scope.go:117] "RemoveContainer" containerID="52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.312069 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.328954 4791 scope.go:117] "RemoveContainer" containerID="9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.330488 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mstx5"] Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.392715 4791 scope.go:117] "RemoveContainer" containerID="00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77" Feb 18 01:50:12 crc kubenswrapper[4791]: E0218 01:50:12.398466 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77\": container with ID starting with 00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77 not found: ID does not exist" containerID="00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.398512 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77"} err="failed to get container status \"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77\": rpc error: code = NotFound desc = could not find container \"00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77\": container with ID starting with 00a899a7832f03df372481ba9a6dd3197fa2979f7e3ff673570ea15c09fc6c77 not found: ID does not exist" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.398540 4791 scope.go:117] "RemoveContainer" containerID="52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf" Feb 18 01:50:12 crc kubenswrapper[4791]: E0218 01:50:12.399082 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf\": container with ID starting with 52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf not found: ID does not exist" containerID="52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.399111 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf"} err="failed to get container status \"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf\": rpc error: code = NotFound desc = could not find container \"52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf\": container with ID starting with 52a183b39e7393122742d6209c11abbf5576a998bc41b698ad011d14b35eb2cf not found: ID does not exist" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.399130 4791 scope.go:117] "RemoveContainer" containerID="9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8" Feb 18 01:50:12 crc kubenswrapper[4791]: E0218 01:50:12.399420 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8\": container with ID starting with 9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8 not found: ID does not exist" containerID="9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8" Feb 18 01:50:12 crc kubenswrapper[4791]: I0218 01:50:12.399446 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8"} err="failed to get container status \"9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8\": rpc error: code = NotFound desc = could not find container \"9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8\": container with ID starting with 9ab89a41e370f17709c0bed37962d24b8bfda18c21927668c9a286ca23ef84d8 not found: ID does not exist" Feb 18 01:50:13 crc kubenswrapper[4791]: I0218 01:50:13.077536 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" path="/var/lib/kubelet/pods/2cd90cb1-9274-48f7-9a17-6a6bd525e7e3/volumes" Feb 18 01:50:13 crc kubenswrapper[4791]: I0218 01:50:13.284076 4791 generic.go:334] "Generic (PLEG): container finished" podID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerID="56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249" exitCode=0 Feb 18 01:50:13 crc kubenswrapper[4791]: I0218 01:50:13.284129 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerDied","Data":"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249"} Feb 18 01:50:15 crc kubenswrapper[4791]: I0218 01:50:15.319940 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerStarted","Data":"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60"} Feb 18 01:50:15 crc kubenswrapper[4791]: I0218 01:50:15.361237 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-whwtr" podStartSLOduration=3.942573428 podStartE2EDuration="9.361217363s" podCreationTimestamp="2026-02-18 01:50:06 +0000 UTC" firstStartedPulling="2026-02-18 01:50:08.232086428 +0000 UTC m=+4549.800099588" lastFinishedPulling="2026-02-18 01:50:13.650730353 +0000 UTC m=+4555.218743523" observedRunningTime="2026-02-18 01:50:15.352002011 +0000 UTC m=+4556.920015191" watchObservedRunningTime="2026-02-18 01:50:15.361217363 +0000 UTC m=+4556.929230533" Feb 18 01:50:16 crc kubenswrapper[4791]: I0218 01:50:16.578936 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:16 crc kubenswrapper[4791]: I0218 01:50:16.579284 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:17 crc kubenswrapper[4791]: I0218 01:50:17.738033 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-whwtr" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="registry-server" probeResult="failure" output=< Feb 18 01:50:17 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:50:17 crc kubenswrapper[4791]: > Feb 18 01:50:21 crc kubenswrapper[4791]: E0218 01:50:21.063500 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:50:24 crc kubenswrapper[4791]: E0218 01:50:24.063535 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.624972 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.689014 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.800025 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.800083 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.800126 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.800941 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.800998 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" gracePeriod=600 Feb 18 01:50:26 crc kubenswrapper[4791]: I0218 01:50:26.874025 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:28 crc kubenswrapper[4791]: E0218 01:50:28.012851 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:50:28 crc kubenswrapper[4791]: I0218 01:50:28.486379 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" exitCode=0 Feb 18 01:50:28 crc kubenswrapper[4791]: I0218 01:50:28.486459 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a"} Feb 18 01:50:28 crc kubenswrapper[4791]: I0218 01:50:28.486528 4791 scope.go:117] "RemoveContainer" containerID="1ee7d20c8be862193aa0a7ef6ac2c6c603cad1fe6483f1d7f4e49125d0cb2732" Feb 18 01:50:28 crc kubenswrapper[4791]: I0218 01:50:28.486609 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-whwtr" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="registry-server" containerID="cri-o://a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60" gracePeriod=2 Feb 18 01:50:28 crc kubenswrapper[4791]: I0218 01:50:28.487537 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:50:28 crc kubenswrapper[4791]: E0218 01:50:28.488069 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.050653 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.135726 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content\") pod \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.136341 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities\") pod \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.136548 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tvwm\" (UniqueName: \"kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm\") pod \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\" (UID: \"3591c46f-e71e-4a0c-afe1-67fb944d73aa\") " Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.137038 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities" (OuterVolumeSpecName: "utilities") pod "3591c46f-e71e-4a0c-afe1-67fb944d73aa" (UID: "3591c46f-e71e-4a0c-afe1-67fb944d73aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.137907 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.143726 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm" (OuterVolumeSpecName: "kube-api-access-7tvwm") pod "3591c46f-e71e-4a0c-afe1-67fb944d73aa" (UID: "3591c46f-e71e-4a0c-afe1-67fb944d73aa"). InnerVolumeSpecName "kube-api-access-7tvwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.240473 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tvwm\" (UniqueName: \"kubernetes.io/projected/3591c46f-e71e-4a0c-afe1-67fb944d73aa-kube-api-access-7tvwm\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.274636 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3591c46f-e71e-4a0c-afe1-67fb944d73aa" (UID: "3591c46f-e71e-4a0c-afe1-67fb944d73aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.343422 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3591c46f-e71e-4a0c-afe1-67fb944d73aa-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.500642 4791 generic.go:334] "Generic (PLEG): container finished" podID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerID="a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60" exitCode=0 Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.500676 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerDied","Data":"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60"} Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.500700 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whwtr" event={"ID":"3591c46f-e71e-4a0c-afe1-67fb944d73aa","Type":"ContainerDied","Data":"7803d183cee497ee653068fddb302c69c116375aa356bc4c2714f63c78a75a89"} Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.500719 4791 scope.go:117] "RemoveContainer" containerID="a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.500838 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whwtr" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.533247 4791 scope.go:117] "RemoveContainer" containerID="56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.542163 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.555692 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-whwtr"] Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.567576 4791 scope.go:117] "RemoveContainer" containerID="ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.631688 4791 scope.go:117] "RemoveContainer" containerID="a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60" Feb 18 01:50:29 crc kubenswrapper[4791]: E0218 01:50:29.632204 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60\": container with ID starting with a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60 not found: ID does not exist" containerID="a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.632258 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60"} err="failed to get container status \"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60\": rpc error: code = NotFound desc = could not find container \"a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60\": container with ID starting with a355dda5d140210a69a7a4c0de448f0a5e4902bd26474314913c2f815584da60 not found: ID does not exist" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.632290 4791 scope.go:117] "RemoveContainer" containerID="56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249" Feb 18 01:50:29 crc kubenswrapper[4791]: E0218 01:50:29.632602 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249\": container with ID starting with 56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249 not found: ID does not exist" containerID="56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.632634 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249"} err="failed to get container status \"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249\": rpc error: code = NotFound desc = could not find container \"56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249\": container with ID starting with 56d73765d346d518d009731abed649cd0097a3a2f41e353e5077b0e870779249 not found: ID does not exist" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.632653 4791 scope.go:117] "RemoveContainer" containerID="ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd" Feb 18 01:50:29 crc kubenswrapper[4791]: E0218 01:50:29.633216 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd\": container with ID starting with ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd not found: ID does not exist" containerID="ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd" Feb 18 01:50:29 crc kubenswrapper[4791]: I0218 01:50:29.633242 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd"} err="failed to get container status \"ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd\": rpc error: code = NotFound desc = could not find container \"ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd\": container with ID starting with ce42fe960439e4660989ee385055fc5380a3cd0710ba987e4cdda0b1624b9bdd not found: ID does not exist" Feb 18 01:50:31 crc kubenswrapper[4791]: I0218 01:50:31.082284 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" path="/var/lib/kubelet/pods/3591c46f-e71e-4a0c-afe1-67fb944d73aa/volumes" Feb 18 01:50:35 crc kubenswrapper[4791]: E0218 01:50:35.063620 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:50:37 crc kubenswrapper[4791]: E0218 01:50:37.063347 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:50:41 crc kubenswrapper[4791]: I0218 01:50:41.061761 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:50:41 crc kubenswrapper[4791]: E0218 01:50:41.062730 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:50:46 crc kubenswrapper[4791]: E0218 01:50:46.068146 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:50:51 crc kubenswrapper[4791]: E0218 01:50:51.063775 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:50:55 crc kubenswrapper[4791]: I0218 01:50:55.062714 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:50:55 crc kubenswrapper[4791]: E0218 01:50:55.064270 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:50:57 crc kubenswrapper[4791]: E0218 01:50:57.063750 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:51:05 crc kubenswrapper[4791]: E0218 01:51:05.063186 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:51:08 crc kubenswrapper[4791]: I0218 01:51:08.062763 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:51:08 crc kubenswrapper[4791]: E0218 01:51:08.063770 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:51:10 crc kubenswrapper[4791]: E0218 01:51:10.065334 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:51:17 crc kubenswrapper[4791]: E0218 01:51:17.064762 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:51:21 crc kubenswrapper[4791]: E0218 01:51:21.065474 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:51:23 crc kubenswrapper[4791]: I0218 01:51:23.061649 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:51:23 crc kubenswrapper[4791]: E0218 01:51:23.062364 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:51:28 crc kubenswrapper[4791]: E0218 01:51:28.064959 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:51:32 crc kubenswrapper[4791]: E0218 01:51:32.076384 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:51:37 crc kubenswrapper[4791]: I0218 01:51:37.062477 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:51:37 crc kubenswrapper[4791]: E0218 01:51:37.063586 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:51:43 crc kubenswrapper[4791]: E0218 01:51:43.063934 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:51:45 crc kubenswrapper[4791]: E0218 01:51:45.063797 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:51:48 crc kubenswrapper[4791]: I0218 01:51:48.062653 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:51:48 crc kubenswrapper[4791]: E0218 01:51:48.063364 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:51:56 crc kubenswrapper[4791]: E0218 01:51:56.064209 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:51:56 crc kubenswrapper[4791]: E0218 01:51:56.064331 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:52:02 crc kubenswrapper[4791]: I0218 01:52:02.061518 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:52:02 crc kubenswrapper[4791]: E0218 01:52:02.063388 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:52:08 crc kubenswrapper[4791]: E0218 01:52:08.063949 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:52:09 crc kubenswrapper[4791]: E0218 01:52:09.071007 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:52:13 crc kubenswrapper[4791]: I0218 01:52:13.061584 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:52:13 crc kubenswrapper[4791]: E0218 01:52:13.062400 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:52:21 crc kubenswrapper[4791]: E0218 01:52:21.064487 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:52:22 crc kubenswrapper[4791]: E0218 01:52:22.063317 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:52:24 crc kubenswrapper[4791]: I0218 01:52:24.062144 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:52:24 crc kubenswrapper[4791]: E0218 01:52:24.062835 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.343785 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kwhh6"] Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344672 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344685 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344703 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="extract-utilities" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344709 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="extract-utilities" Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344725 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="extract-content" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344732 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="extract-content" Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344760 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="extract-utilities" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344766 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="extract-utilities" Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344774 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344779 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: E0218 01:52:30.344796 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="extract-content" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344801 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="extract-content" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.344992 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3591c46f-e71e-4a0c-afe1-67fb944d73aa" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.345012 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd90cb1-9274-48f7-9a17-6a6bd525e7e3" containerName="registry-server" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.348191 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.359247 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kwhh6"] Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.439524 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvxq\" (UniqueName: \"kubernetes.io/projected/777a351a-ca39-46f0-912f-fbbda9651efe-kube-api-access-tnvxq\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.439639 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-utilities\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.439771 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-catalog-content\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.541960 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvxq\" (UniqueName: \"kubernetes.io/projected/777a351a-ca39-46f0-912f-fbbda9651efe-kube-api-access-tnvxq\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.542078 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-utilities\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.542207 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-catalog-content\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.542598 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-utilities\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.542713 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a351a-ca39-46f0-912f-fbbda9651efe-catalog-content\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.573428 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvxq\" (UniqueName: \"kubernetes.io/projected/777a351a-ca39-46f0-912f-fbbda9651efe-kube-api-access-tnvxq\") pod \"community-operators-kwhh6\" (UID: \"777a351a-ca39-46f0-912f-fbbda9651efe\") " pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:30 crc kubenswrapper[4791]: I0218 01:52:30.676268 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:31 crc kubenswrapper[4791]: I0218 01:52:31.281120 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kwhh6"] Feb 18 01:52:31 crc kubenswrapper[4791]: I0218 01:52:31.899319 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kwhh6" event={"ID":"777a351a-ca39-46f0-912f-fbbda9651efe","Type":"ContainerStarted","Data":"f6b3dc701d6950d3707d6af25819ff4ac4e58b840a8bb1e0e9fbfc68d2b6bb54"} Feb 18 01:52:32 crc kubenswrapper[4791]: I0218 01:52:32.917852 4791 generic.go:334] "Generic (PLEG): container finished" podID="777a351a-ca39-46f0-912f-fbbda9651efe" containerID="0cb4fc63914ff4070c27a4c323e97da47bf36a6609bd0f8e1a57e93fa1f247b5" exitCode=0 Feb 18 01:52:32 crc kubenswrapper[4791]: I0218 01:52:32.917935 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kwhh6" event={"ID":"777a351a-ca39-46f0-912f-fbbda9651efe","Type":"ContainerDied","Data":"0cb4fc63914ff4070c27a4c323e97da47bf36a6609bd0f8e1a57e93fa1f247b5"} Feb 18 01:52:32 crc kubenswrapper[4791]: I0218 01:52:32.921214 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:52:36 crc kubenswrapper[4791]: E0218 01:52:36.208263 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:52:36 crc kubenswrapper[4791]: E0218 01:52:36.209100 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:52:36 crc kubenswrapper[4791]: E0218 01:52:36.209290 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:52:36 crc kubenswrapper[4791]: E0218 01:52:36.210477 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:52:37 crc kubenswrapper[4791]: E0218 01:52:37.064432 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:52:38 crc kubenswrapper[4791]: I0218 01:52:38.062181 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:52:38 crc kubenswrapper[4791]: E0218 01:52:38.062848 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:52:38 crc kubenswrapper[4791]: I0218 01:52:38.974111 4791 generic.go:334] "Generic (PLEG): container finished" podID="777a351a-ca39-46f0-912f-fbbda9651efe" containerID="22774b70d07132ae1e9d1dcd3bf6e1ab9e7b4193aad7d1d7689c5605f294feda" exitCode=0 Feb 18 01:52:38 crc kubenswrapper[4791]: I0218 01:52:38.974201 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kwhh6" event={"ID":"777a351a-ca39-46f0-912f-fbbda9651efe","Type":"ContainerDied","Data":"22774b70d07132ae1e9d1dcd3bf6e1ab9e7b4193aad7d1d7689c5605f294feda"} Feb 18 01:52:39 crc kubenswrapper[4791]: I0218 01:52:39.985645 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kwhh6" event={"ID":"777a351a-ca39-46f0-912f-fbbda9651efe","Type":"ContainerStarted","Data":"5ff819f4dd1674c81bca57ab43d0ed5ca1f74893a566a4c832ea34199a43789d"} Feb 18 01:52:40 crc kubenswrapper[4791]: I0218 01:52:40.005608 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kwhh6" podStartSLOduration=3.568329992 podStartE2EDuration="10.005591657s" podCreationTimestamp="2026-02-18 01:52:30 +0000 UTC" firstStartedPulling="2026-02-18 01:52:32.92082509 +0000 UTC m=+4694.488838270" lastFinishedPulling="2026-02-18 01:52:39.358086765 +0000 UTC m=+4700.926099935" observedRunningTime="2026-02-18 01:52:40.00376458 +0000 UTC m=+4701.571777750" watchObservedRunningTime="2026-02-18 01:52:40.005591657 +0000 UTC m=+4701.573604827" Feb 18 01:52:40 crc kubenswrapper[4791]: I0218 01:52:40.677005 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:40 crc kubenswrapper[4791]: I0218 01:52:40.677404 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:41 crc kubenswrapper[4791]: I0218 01:52:41.733731 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-kwhh6" podUID="777a351a-ca39-46f0-912f-fbbda9651efe" containerName="registry-server" probeResult="failure" output=< Feb 18 01:52:41 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 01:52:41 crc kubenswrapper[4791]: > Feb 18 01:52:49 crc kubenswrapper[4791]: I0218 01:52:49.069010 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:52:49 crc kubenswrapper[4791]: E0218 01:52:49.069818 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:52:50 crc kubenswrapper[4791]: I0218 01:52:50.723304 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:50 crc kubenswrapper[4791]: I0218 01:52:50.770846 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kwhh6" Feb 18 01:52:50 crc kubenswrapper[4791]: I0218 01:52:50.839882 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kwhh6"] Feb 18 01:52:50 crc kubenswrapper[4791]: I0218 01:52:50.963459 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 01:52:50 crc kubenswrapper[4791]: I0218 01:52:50.963746 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rt86r" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="registry-server" containerID="cri-o://fce7df76572934b9580e55e2f39be6d123f908c71b89cf1fe6c1d537b27c3670" gracePeriod=2 Feb 18 01:52:51 crc kubenswrapper[4791]: E0218 01:52:51.065429 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:52:51 crc kubenswrapper[4791]: E0218 01:52:51.065402 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.092588 4791 generic.go:334] "Generic (PLEG): container finished" podID="dc9c385f-8293-43dc-902b-89632cda2af9" containerID="fce7df76572934b9580e55e2f39be6d123f908c71b89cf1fe6c1d537b27c3670" exitCode=0 Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.093684 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerDied","Data":"fce7df76572934b9580e55e2f39be6d123f908c71b89cf1fe6c1d537b27c3670"} Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.503134 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt86r" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.667978 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content\") pod \"dc9c385f-8293-43dc-902b-89632cda2af9\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.668342 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drp2s\" (UniqueName: \"kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s\") pod \"dc9c385f-8293-43dc-902b-89632cda2af9\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.668390 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities\") pod \"dc9c385f-8293-43dc-902b-89632cda2af9\" (UID: \"dc9c385f-8293-43dc-902b-89632cda2af9\") " Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.669360 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities" (OuterVolumeSpecName: "utilities") pod "dc9c385f-8293-43dc-902b-89632cda2af9" (UID: "dc9c385f-8293-43dc-902b-89632cda2af9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.674187 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s" (OuterVolumeSpecName: "kube-api-access-drp2s") pod "dc9c385f-8293-43dc-902b-89632cda2af9" (UID: "dc9c385f-8293-43dc-902b-89632cda2af9"). InnerVolumeSpecName "kube-api-access-drp2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.736307 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc9c385f-8293-43dc-902b-89632cda2af9" (UID: "dc9c385f-8293-43dc-902b-89632cda2af9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.786378 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.786455 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drp2s\" (UniqueName: \"kubernetes.io/projected/dc9c385f-8293-43dc-902b-89632cda2af9-kube-api-access-drp2s\") on node \"crc\" DevicePath \"\"" Feb 18 01:52:51 crc kubenswrapper[4791]: I0218 01:52:51.786470 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc9c385f-8293-43dc-902b-89632cda2af9-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.104128 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt86r" event={"ID":"dc9c385f-8293-43dc-902b-89632cda2af9","Type":"ContainerDied","Data":"cb038567eaaedbb3730c621250847789e096aac4618bc0d5ea17d8b67fda6e3a"} Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.104219 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt86r" Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.104238 4791 scope.go:117] "RemoveContainer" containerID="fce7df76572934b9580e55e2f39be6d123f908c71b89cf1fe6c1d537b27c3670" Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.142515 4791 scope.go:117] "RemoveContainer" containerID="bbf72b2c9f12dee74f3fcede599f6f8a88a35b9f83d934c686c540c4b3cccc5f" Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.154423 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.168133 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rt86r"] Feb 18 01:52:52 crc kubenswrapper[4791]: I0218 01:52:52.176887 4791 scope.go:117] "RemoveContainer" containerID="ccdb4842ecc160f46564ca299721a5106726a0f4ba35bef5f4d38a28e1f6d883" Feb 18 01:52:53 crc kubenswrapper[4791]: I0218 01:52:53.075346 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" path="/var/lib/kubelet/pods/dc9c385f-8293-43dc-902b-89632cda2af9/volumes" Feb 18 01:53:02 crc kubenswrapper[4791]: E0218 01:53:02.063959 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:53:03 crc kubenswrapper[4791]: I0218 01:53:03.061790 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:53:03 crc kubenswrapper[4791]: E0218 01:53:03.062421 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:53:04 crc kubenswrapper[4791]: E0218 01:53:04.062867 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:53:17 crc kubenswrapper[4791]: E0218 01:53:17.064499 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:53:17 crc kubenswrapper[4791]: E0218 01:53:17.158496 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:53:17 crc kubenswrapper[4791]: E0218 01:53:17.158569 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:53:17 crc kubenswrapper[4791]: E0218 01:53:17.159019 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:53:17 crc kubenswrapper[4791]: E0218 01:53:17.160216 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:53:18 crc kubenswrapper[4791]: I0218 01:53:18.062922 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:53:18 crc kubenswrapper[4791]: E0218 01:53:18.064827 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:53:28 crc kubenswrapper[4791]: E0218 01:53:28.064340 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:53:31 crc kubenswrapper[4791]: E0218 01:53:31.068881 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:53:33 crc kubenswrapper[4791]: I0218 01:53:33.062695 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:53:33 crc kubenswrapper[4791]: E0218 01:53:33.063980 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:53:42 crc kubenswrapper[4791]: E0218 01:53:42.065448 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:53:43 crc kubenswrapper[4791]: E0218 01:53:43.062812 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:53:45 crc kubenswrapper[4791]: I0218 01:53:45.062149 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:53:45 crc kubenswrapper[4791]: E0218 01:53:45.062817 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:53:54 crc kubenswrapper[4791]: E0218 01:53:54.063085 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:53:55 crc kubenswrapper[4791]: E0218 01:53:55.064805 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:53:56 crc kubenswrapper[4791]: I0218 01:53:56.061877 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:53:56 crc kubenswrapper[4791]: E0218 01:53:56.062667 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:54:06 crc kubenswrapper[4791]: E0218 01:54:06.063894 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:54:06 crc kubenswrapper[4791]: E0218 01:54:06.064785 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:54:10 crc kubenswrapper[4791]: I0218 01:54:10.062103 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:54:10 crc kubenswrapper[4791]: E0218 01:54:10.064174 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:54:19 crc kubenswrapper[4791]: E0218 01:54:19.071314 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:54:20 crc kubenswrapper[4791]: E0218 01:54:20.064090 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.062189 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm"] Feb 18 01:54:22 crc kubenswrapper[4791]: E0218 01:54:22.064978 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="registry-server" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.065109 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="registry-server" Feb 18 01:54:22 crc kubenswrapper[4791]: E0218 01:54:22.065261 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="extract-utilities" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.065279 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="extract-utilities" Feb 18 01:54:22 crc kubenswrapper[4791]: E0218 01:54:22.065321 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="extract-content" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.065332 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="extract-content" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.066172 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc9c385f-8293-43dc-902b-89632cda2af9" containerName="registry-server" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.069959 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.086976 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.087325 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.087441 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-qjdk4" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.087329 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.112709 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm"] Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.130193 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.130543 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.130610 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcrjb\" (UniqueName: \"kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.232612 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.232992 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.233032 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcrjb\" (UniqueName: \"kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.594487 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.594862 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.595689 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcrjb\" (UniqueName: \"kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:22 crc kubenswrapper[4791]: I0218 01:54:22.706523 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 01:54:23 crc kubenswrapper[4791]: I0218 01:54:23.062559 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:54:23 crc kubenswrapper[4791]: E0218 01:54:23.063067 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:54:23 crc kubenswrapper[4791]: W0218 01:54:23.276182 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbec10b7f_b30a_4c42_828c_2d3d86635d33.slice/crio-04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5 WatchSource:0}: Error finding container 04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5: Status 404 returned error can't find the container with id 04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5 Feb 18 01:54:23 crc kubenswrapper[4791]: I0218 01:54:23.279651 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm"] Feb 18 01:54:24 crc kubenswrapper[4791]: I0218 01:54:24.144762 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" event={"ID":"bec10b7f-b30a-4c42-828c-2d3d86635d33","Type":"ContainerStarted","Data":"b7e89fb4563120f85c657d1532b2f359002a0217a76fa7ba8d0f4af269a538cc"} Feb 18 01:54:24 crc kubenswrapper[4791]: I0218 01:54:24.145396 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" event={"ID":"bec10b7f-b30a-4c42-828c-2d3d86635d33","Type":"ContainerStarted","Data":"04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5"} Feb 18 01:54:24 crc kubenswrapper[4791]: I0218 01:54:24.180091 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" podStartSLOduration=1.582047642 podStartE2EDuration="2.180066837s" podCreationTimestamp="2026-02-18 01:54:22 +0000 UTC" firstStartedPulling="2026-02-18 01:54:23.279370522 +0000 UTC m=+4804.847383692" lastFinishedPulling="2026-02-18 01:54:23.877389717 +0000 UTC m=+4805.445402887" observedRunningTime="2026-02-18 01:54:24.162059254 +0000 UTC m=+4805.730072434" watchObservedRunningTime="2026-02-18 01:54:24.180066837 +0000 UTC m=+4805.748080007" Feb 18 01:54:30 crc kubenswrapper[4791]: E0218 01:54:30.063725 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:54:31 crc kubenswrapper[4791]: E0218 01:54:31.082568 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:54:37 crc kubenswrapper[4791]: I0218 01:54:37.061261 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:54:37 crc kubenswrapper[4791]: E0218 01:54:37.062377 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:54:44 crc kubenswrapper[4791]: I0218 01:54:44.831511 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:54:44 crc kubenswrapper[4791]: I0218 01:54:44.834355 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:44 crc kubenswrapper[4791]: I0218 01:54:44.846016 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.029769 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.030152 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7zqq\" (UniqueName: \"kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.030188 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: E0218 01:54:45.063537 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.131909 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.131988 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7zqq\" (UniqueName: \"kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.132013 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.132566 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.132791 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.152887 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7zqq\" (UniqueName: \"kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq\") pod \"redhat-marketplace-clg4f\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:45 crc kubenswrapper[4791]: I0218 01:54:45.453450 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:46 crc kubenswrapper[4791]: I0218 01:54:46.018112 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:54:46 crc kubenswrapper[4791]: E0218 01:54:46.063699 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:54:46 crc kubenswrapper[4791]: I0218 01:54:46.409499 4791 generic.go:334] "Generic (PLEG): container finished" podID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerID="af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221" exitCode=0 Feb 18 01:54:46 crc kubenswrapper[4791]: I0218 01:54:46.409590 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerDied","Data":"af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221"} Feb 18 01:54:46 crc kubenswrapper[4791]: I0218 01:54:46.410014 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerStarted","Data":"661071a2ea64524f65e841c94d026c3ff8a9b005c7ca0703148e27049340e600"} Feb 18 01:54:48 crc kubenswrapper[4791]: I0218 01:54:48.434235 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerStarted","Data":"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e"} Feb 18 01:54:49 crc kubenswrapper[4791]: I0218 01:54:49.446739 4791 generic.go:334] "Generic (PLEG): container finished" podID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerID="c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e" exitCode=0 Feb 18 01:54:49 crc kubenswrapper[4791]: I0218 01:54:49.446790 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerDied","Data":"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e"} Feb 18 01:54:50 crc kubenswrapper[4791]: I0218 01:54:50.462294 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerStarted","Data":"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c"} Feb 18 01:54:50 crc kubenswrapper[4791]: I0218 01:54:50.496766 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-clg4f" podStartSLOduration=3.009530065 podStartE2EDuration="6.496746467s" podCreationTimestamp="2026-02-18 01:54:44 +0000 UTC" firstStartedPulling="2026-02-18 01:54:46.412374298 +0000 UTC m=+4827.980387498" lastFinishedPulling="2026-02-18 01:54:49.89959072 +0000 UTC m=+4831.467603900" observedRunningTime="2026-02-18 01:54:50.485756431 +0000 UTC m=+4832.053769631" watchObservedRunningTime="2026-02-18 01:54:50.496746467 +0000 UTC m=+4832.064759657" Feb 18 01:54:52 crc kubenswrapper[4791]: I0218 01:54:52.061588 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:54:52 crc kubenswrapper[4791]: E0218 01:54:52.062233 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:54:55 crc kubenswrapper[4791]: I0218 01:54:55.454357 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:55 crc kubenswrapper[4791]: I0218 01:54:55.455141 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:55 crc kubenswrapper[4791]: I0218 01:54:55.521281 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:56 crc kubenswrapper[4791]: I0218 01:54:56.580997 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:54:58 crc kubenswrapper[4791]: E0218 01:54:58.071580 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:54:59 crc kubenswrapper[4791]: I0218 01:54:59.219759 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:54:59 crc kubenswrapper[4791]: I0218 01:54:59.220356 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-clg4f" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="registry-server" containerID="cri-o://b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c" gracePeriod=2 Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.108027 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.255317 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7zqq\" (UniqueName: \"kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq\") pod \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.255484 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities\") pod \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.255519 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content\") pod \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\" (UID: \"fb75cd6f-fc27-410d-9166-6a02cdd578b3\") " Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.256880 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities" (OuterVolumeSpecName: "utilities") pod "fb75cd6f-fc27-410d-9166-6a02cdd578b3" (UID: "fb75cd6f-fc27-410d-9166-6a02cdd578b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.265415 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq" (OuterVolumeSpecName: "kube-api-access-w7zqq") pod "fb75cd6f-fc27-410d-9166-6a02cdd578b3" (UID: "fb75cd6f-fc27-410d-9166-6a02cdd578b3"). InnerVolumeSpecName "kube-api-access-w7zqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.281443 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb75cd6f-fc27-410d-9166-6a02cdd578b3" (UID: "fb75cd6f-fc27-410d-9166-6a02cdd578b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.359192 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.359228 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb75cd6f-fc27-410d-9166-6a02cdd578b3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.359240 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7zqq\" (UniqueName: \"kubernetes.io/projected/fb75cd6f-fc27-410d-9166-6a02cdd578b3-kube-api-access-w7zqq\") on node \"crc\" DevicePath \"\"" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.590898 4791 generic.go:334] "Generic (PLEG): container finished" podID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerID="b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c" exitCode=0 Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.590944 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerDied","Data":"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c"} Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.590970 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clg4f" event={"ID":"fb75cd6f-fc27-410d-9166-6a02cdd578b3","Type":"ContainerDied","Data":"661071a2ea64524f65e841c94d026c3ff8a9b005c7ca0703148e27049340e600"} Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.590986 4791 scope.go:117] "RemoveContainer" containerID="b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.591013 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clg4f" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.621063 4791 scope.go:117] "RemoveContainer" containerID="c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.627600 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.654666 4791 scope.go:117] "RemoveContainer" containerID="af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.660893 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-clg4f"] Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.706467 4791 scope.go:117] "RemoveContainer" containerID="b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c" Feb 18 01:55:00 crc kubenswrapper[4791]: E0218 01:55:00.706961 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c\": container with ID starting with b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c not found: ID does not exist" containerID="b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.707072 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c"} err="failed to get container status \"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c\": rpc error: code = NotFound desc = could not find container \"b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c\": container with ID starting with b7c378ef17f6be03fc0d929c780ffd1e2b6d3d2ca83288a0d9a361f78d7a0f8c not found: ID does not exist" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.707095 4791 scope.go:117] "RemoveContainer" containerID="c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e" Feb 18 01:55:00 crc kubenswrapper[4791]: E0218 01:55:00.707420 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e\": container with ID starting with c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e not found: ID does not exist" containerID="c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.707472 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e"} err="failed to get container status \"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e\": rpc error: code = NotFound desc = could not find container \"c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e\": container with ID starting with c9d125d1071a8ab86419e1ea8932b148351729fc090e230e5cfe42f234457b8e not found: ID does not exist" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.707504 4791 scope.go:117] "RemoveContainer" containerID="af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221" Feb 18 01:55:00 crc kubenswrapper[4791]: E0218 01:55:00.707876 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221\": container with ID starting with af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221 not found: ID does not exist" containerID="af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221" Feb 18 01:55:00 crc kubenswrapper[4791]: I0218 01:55:00.707919 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221"} err="failed to get container status \"af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221\": rpc error: code = NotFound desc = could not find container \"af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221\": container with ID starting with af29669a78162f6b65e202cc1b7d1a098063b11bfc71af043f98b775a46aa221 not found: ID does not exist" Feb 18 01:55:01 crc kubenswrapper[4791]: E0218 01:55:01.064516 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:55:01 crc kubenswrapper[4791]: I0218 01:55:01.073927 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" path="/var/lib/kubelet/pods/fb75cd6f-fc27-410d-9166-6a02cdd578b3/volumes" Feb 18 01:55:07 crc kubenswrapper[4791]: I0218 01:55:07.062515 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:55:07 crc kubenswrapper[4791]: E0218 01:55:07.063828 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:55:13 crc kubenswrapper[4791]: E0218 01:55:13.063285 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:55:13 crc kubenswrapper[4791]: E0218 01:55:13.063471 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:55:21 crc kubenswrapper[4791]: I0218 01:55:21.062218 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:55:21 crc kubenswrapper[4791]: E0218 01:55:21.063179 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 01:55:26 crc kubenswrapper[4791]: E0218 01:55:26.064589 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:55:27 crc kubenswrapper[4791]: E0218 01:55:27.064411 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:55:32 crc kubenswrapper[4791]: I0218 01:55:32.062253 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:55:32 crc kubenswrapper[4791]: I0218 01:55:32.967236 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a"} Feb 18 01:55:40 crc kubenswrapper[4791]: E0218 01:55:40.065184 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:55:41 crc kubenswrapper[4791]: E0218 01:55:41.064363 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:55:52 crc kubenswrapper[4791]: E0218 01:55:52.062883 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:55:54 crc kubenswrapper[4791]: E0218 01:55:54.064240 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:56:05 crc kubenswrapper[4791]: E0218 01:56:05.064434 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:56:06 crc kubenswrapper[4791]: E0218 01:56:06.064713 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:56:18 crc kubenswrapper[4791]: E0218 01:56:18.064570 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:56:19 crc kubenswrapper[4791]: E0218 01:56:19.099431 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:56:30 crc kubenswrapper[4791]: E0218 01:56:30.062813 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:56:33 crc kubenswrapper[4791]: E0218 01:56:33.062673 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:56:44 crc kubenswrapper[4791]: E0218 01:56:44.066139 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:56:44 crc kubenswrapper[4791]: E0218 01:56:44.066139 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:56:56 crc kubenswrapper[4791]: E0218 01:56:56.063932 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:56:56 crc kubenswrapper[4791]: E0218 01:56:56.064414 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:57:08 crc kubenswrapper[4791]: E0218 01:57:08.066978 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:57:10 crc kubenswrapper[4791]: E0218 01:57:10.065087 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:57:21 crc kubenswrapper[4791]: E0218 01:57:21.064047 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:57:23 crc kubenswrapper[4791]: E0218 01:57:23.064289 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:57:36 crc kubenswrapper[4791]: E0218 01:57:36.065631 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:57:37 crc kubenswrapper[4791]: E0218 01:57:37.063007 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:57:48 crc kubenswrapper[4791]: I0218 01:57:48.066732 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 01:57:48 crc kubenswrapper[4791]: E0218 01:57:48.067127 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:57:48 crc kubenswrapper[4791]: E0218 01:57:48.196344 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:57:48 crc kubenswrapper[4791]: E0218 01:57:48.196591 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 01:57:48 crc kubenswrapper[4791]: E0218 01:57:48.196701 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:57:48 crc kubenswrapper[4791]: E0218 01:57:48.197793 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:57:56 crc kubenswrapper[4791]: I0218 01:57:56.799773 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:57:56 crc kubenswrapper[4791]: I0218 01:57:56.800578 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:58:00 crc kubenswrapper[4791]: E0218 01:58:00.063204 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:58:01 crc kubenswrapper[4791]: E0218 01:58:01.064539 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:58:14 crc kubenswrapper[4791]: E0218 01:58:14.064972 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:58:14 crc kubenswrapper[4791]: E0218 01:58:14.065501 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:58:26 crc kubenswrapper[4791]: E0218 01:58:26.064555 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:58:26 crc kubenswrapper[4791]: I0218 01:58:26.799959 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:58:26 crc kubenswrapper[4791]: I0218 01:58:26.800363 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:58:27 crc kubenswrapper[4791]: E0218 01:58:27.190775 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:58:27 crc kubenswrapper[4791]: E0218 01:58:27.190852 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 01:58:27 crc kubenswrapper[4791]: E0218 01:58:27.191069 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 01:58:27 crc kubenswrapper[4791]: E0218 01:58:27.192276 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:58:39 crc kubenswrapper[4791]: E0218 01:58:39.075723 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:58:41 crc kubenswrapper[4791]: E0218 01:58:41.063762 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:58:52 crc kubenswrapper[4791]: E0218 01:58:52.064079 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:58:54 crc kubenswrapper[4791]: E0218 01:58:54.062906 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:58:56 crc kubenswrapper[4791]: I0218 01:58:56.799834 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 01:58:56 crc kubenswrapper[4791]: I0218 01:58:56.800509 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 01:58:56 crc kubenswrapper[4791]: I0218 01:58:56.800741 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 01:58:56 crc kubenswrapper[4791]: I0218 01:58:56.801626 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 01:58:56 crc kubenswrapper[4791]: I0218 01:58:56.801697 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a" gracePeriod=600 Feb 18 01:58:57 crc kubenswrapper[4791]: I0218 01:58:57.299200 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a" exitCode=0 Feb 18 01:58:57 crc kubenswrapper[4791]: I0218 01:58:57.299268 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a"} Feb 18 01:58:57 crc kubenswrapper[4791]: I0218 01:58:57.299547 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8"} Feb 18 01:58:57 crc kubenswrapper[4791]: I0218 01:58:57.299578 4791 scope.go:117] "RemoveContainer" containerID="013bfa5afe6249afde24459696ad10cac9297eef278d7d81abf4388dec91ac8a" Feb 18 01:59:03 crc kubenswrapper[4791]: E0218 01:59:03.080496 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:59:05 crc kubenswrapper[4791]: E0218 01:59:05.064441 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:59:16 crc kubenswrapper[4791]: E0218 01:59:16.063873 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:59:20 crc kubenswrapper[4791]: E0218 01:59:20.065129 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:59:31 crc kubenswrapper[4791]: E0218 01:59:31.064635 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:59:34 crc kubenswrapper[4791]: E0218 01:59:34.063653 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:59:43 crc kubenswrapper[4791]: E0218 01:59:43.063457 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:59:45 crc kubenswrapper[4791]: E0218 01:59:45.063838 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 01:59:54 crc kubenswrapper[4791]: E0218 01:59:54.063079 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 01:59:57 crc kubenswrapper[4791]: E0218 01:59:57.064028 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.030928 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:00 crc kubenswrapper[4791]: E0218 02:00:00.031854 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="extract-content" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.031872 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="extract-content" Feb 18 02:00:00 crc kubenswrapper[4791]: E0218 02:00:00.031905 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="registry-server" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.031914 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="registry-server" Feb 18 02:00:00 crc kubenswrapper[4791]: E0218 02:00:00.031939 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="extract-utilities" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.031948 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="extract-utilities" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.032303 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb75cd6f-fc27-410d-9166-6a02cdd578b3" containerName="registry-server" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.034575 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.041532 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.144549 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.144733 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqlmk\" (UniqueName: \"kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.144768 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.152264 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv"] Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.153952 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.157401 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.157672 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.165916 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv"] Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.246853 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqlmk\" (UniqueName: \"kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.246919 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247006 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247347 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247485 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247143 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247563 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.247754 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw8xq\" (UniqueName: \"kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.273208 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqlmk\" (UniqueName: \"kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk\") pod \"certified-operators-84mqx\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.350006 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw8xq\" (UniqueName: \"kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.350696 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.350848 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.351833 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.360832 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.371688 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw8xq\" (UniqueName: \"kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq\") pod \"collect-profiles-29523000-nxtdv\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.398577 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.492716 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:00 crc kubenswrapper[4791]: I0218 02:00:00.961528 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:01 crc kubenswrapper[4791]: I0218 02:00:01.097172 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerStarted","Data":"ff11a5c988417e4fb2c3fdf7f0e7b3151390f0820d3c299c799f134c8e73bcd0"} Feb 18 02:00:01 crc kubenswrapper[4791]: W0218 02:00:01.110767 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3629d197_c7eb_4a80_9d25_38d7f0c4dc65.slice/crio-d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a WatchSource:0}: Error finding container d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a: Status 404 returned error can't find the container with id d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a Feb 18 02:00:01 crc kubenswrapper[4791]: I0218 02:00:01.115469 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv"] Feb 18 02:00:02 crc kubenswrapper[4791]: I0218 02:00:02.118097 4791 generic.go:334] "Generic (PLEG): container finished" podID="3629d197-c7eb-4a80-9d25-38d7f0c4dc65" containerID="f585711cb848e334147b5e740710a67178d0e064f248467de39fa5b4d99c4618" exitCode=0 Feb 18 02:00:02 crc kubenswrapper[4791]: I0218 02:00:02.118397 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" event={"ID":"3629d197-c7eb-4a80-9d25-38d7f0c4dc65","Type":"ContainerDied","Data":"f585711cb848e334147b5e740710a67178d0e064f248467de39fa5b4d99c4618"} Feb 18 02:00:02 crc kubenswrapper[4791]: I0218 02:00:02.118426 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" event={"ID":"3629d197-c7eb-4a80-9d25-38d7f0c4dc65","Type":"ContainerStarted","Data":"d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a"} Feb 18 02:00:02 crc kubenswrapper[4791]: I0218 02:00:02.123380 4791 generic.go:334] "Generic (PLEG): container finished" podID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerID="8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d" exitCode=0 Feb 18 02:00:02 crc kubenswrapper[4791]: I0218 02:00:02.123602 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerDied","Data":"8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d"} Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.526368 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.633263 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw8xq\" (UniqueName: \"kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq\") pod \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.633519 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume\") pod \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.633756 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume\") pod \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\" (UID: \"3629d197-c7eb-4a80-9d25-38d7f0c4dc65\") " Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.634226 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume" (OuterVolumeSpecName: "config-volume") pod "3629d197-c7eb-4a80-9d25-38d7f0c4dc65" (UID: "3629d197-c7eb-4a80-9d25-38d7f0c4dc65"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.634862 4791 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-config-volume\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.638818 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq" (OuterVolumeSpecName: "kube-api-access-jw8xq") pod "3629d197-c7eb-4a80-9d25-38d7f0c4dc65" (UID: "3629d197-c7eb-4a80-9d25-38d7f0c4dc65"). InnerVolumeSpecName "kube-api-access-jw8xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.639317 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3629d197-c7eb-4a80-9d25-38d7f0c4dc65" (UID: "3629d197-c7eb-4a80-9d25-38d7f0c4dc65"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.737488 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw8xq\" (UniqueName: \"kubernetes.io/projected/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-kube-api-access-jw8xq\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:03 crc kubenswrapper[4791]: I0218 02:00:03.737522 4791 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629d197-c7eb-4a80-9d25-38d7f0c4dc65-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:04 crc kubenswrapper[4791]: I0218 02:00:04.144215 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" event={"ID":"3629d197-c7eb-4a80-9d25-38d7f0c4dc65","Type":"ContainerDied","Data":"d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a"} Feb 18 02:00:04 crc kubenswrapper[4791]: I0218 02:00:04.144279 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0a52ffc037a02446ec127c338cc2ef0c394e555a700455e558f8fb6c546be8a" Feb 18 02:00:04 crc kubenswrapper[4791]: I0218 02:00:04.144274 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29523000-nxtdv" Feb 18 02:00:04 crc kubenswrapper[4791]: I0218 02:00:04.609249 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql"] Feb 18 02:00:04 crc kubenswrapper[4791]: I0218 02:00:04.620459 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29522955-hqzql"] Feb 18 02:00:05 crc kubenswrapper[4791]: I0218 02:00:05.076246 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6494cc9c-ba04-49f3-bd9a-11dba584dea7" path="/var/lib/kubelet/pods/6494cc9c-ba04-49f3-bd9a-11dba584dea7/volumes" Feb 18 02:00:06 crc kubenswrapper[4791]: E0218 02:00:06.065597 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:00:12 crc kubenswrapper[4791]: E0218 02:00:12.063275 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:00:21 crc kubenswrapper[4791]: E0218 02:00:21.064519 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:00:23 crc kubenswrapper[4791]: E0218 02:00:23.062666 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:00:30 crc kubenswrapper[4791]: I0218 02:00:30.426744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerStarted","Data":"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936"} Feb 18 02:00:31 crc kubenswrapper[4791]: I0218 02:00:31.438488 4791 generic.go:334] "Generic (PLEG): container finished" podID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerID="e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936" exitCode=0 Feb 18 02:00:31 crc kubenswrapper[4791]: I0218 02:00:31.438640 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerDied","Data":"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936"} Feb 18 02:00:31 crc kubenswrapper[4791]: I0218 02:00:31.438927 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerStarted","Data":"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb"} Feb 18 02:00:31 crc kubenswrapper[4791]: I0218 02:00:31.461998 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-84mqx" podStartSLOduration=3.606804152 podStartE2EDuration="32.461979456s" podCreationTimestamp="2026-02-18 01:59:59 +0000 UTC" firstStartedPulling="2026-02-18 02:00:02.127320576 +0000 UTC m=+5143.695333746" lastFinishedPulling="2026-02-18 02:00:30.98249588 +0000 UTC m=+5172.550509050" observedRunningTime="2026-02-18 02:00:31.46013414 +0000 UTC m=+5173.028147330" watchObservedRunningTime="2026-02-18 02:00:31.461979456 +0000 UTC m=+5173.029992636" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.319016 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:00:34 crc kubenswrapper[4791]: E0218 02:00:34.320027 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3629d197-c7eb-4a80-9d25-38d7f0c4dc65" containerName="collect-profiles" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.320040 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="3629d197-c7eb-4a80-9d25-38d7f0c4dc65" containerName="collect-profiles" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.324724 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="3629d197-c7eb-4a80-9d25-38d7f0c4dc65" containerName="collect-profiles" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.326993 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.336257 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.449593 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.449734 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.450011 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw64m\" (UniqueName: \"kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.562666 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.562813 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.562946 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw64m\" (UniqueName: \"kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.564948 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.565389 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.590009 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw64m\" (UniqueName: \"kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m\") pod \"redhat-operators-nc6h7\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.656392 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:34 crc kubenswrapper[4791]: I0218 02:00:34.962431 4791 scope.go:117] "RemoveContainer" containerID="ee848c3633205542201b3615c1e669532006a61f6dd003fa73cc7fc4971989ce" Feb 18 02:00:35 crc kubenswrapper[4791]: I0218 02:00:35.165405 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:00:35 crc kubenswrapper[4791]: W0218 02:00:35.165780 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06e9861f_aebc_4aa5_bf77_8126aa73924e.slice/crio-6dac6a38d2e37481f6b7fe37aa42c6e9ec2bb8919a415600bc7b267065d25dee WatchSource:0}: Error finding container 6dac6a38d2e37481f6b7fe37aa42c6e9ec2bb8919a415600bc7b267065d25dee: Status 404 returned error can't find the container with id 6dac6a38d2e37481f6b7fe37aa42c6e9ec2bb8919a415600bc7b267065d25dee Feb 18 02:00:35 crc kubenswrapper[4791]: I0218 02:00:35.478555 4791 generic.go:334] "Generic (PLEG): container finished" podID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerID="e9812a5f321ab9ddd31e4a83b525cb0947c649178e25d89a3429c01a6b410bc0" exitCode=0 Feb 18 02:00:35 crc kubenswrapper[4791]: I0218 02:00:35.478618 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerDied","Data":"e9812a5f321ab9ddd31e4a83b525cb0947c649178e25d89a3429c01a6b410bc0"} Feb 18 02:00:35 crc kubenswrapper[4791]: I0218 02:00:35.478876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerStarted","Data":"6dac6a38d2e37481f6b7fe37aa42c6e9ec2bb8919a415600bc7b267065d25dee"} Feb 18 02:00:36 crc kubenswrapper[4791]: E0218 02:00:36.063572 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:00:36 crc kubenswrapper[4791]: I0218 02:00:36.498398 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerStarted","Data":"2ea384b028e95f80cb243082add4130e1c01b0ca035730a60f685495b21adae6"} Feb 18 02:00:38 crc kubenswrapper[4791]: E0218 02:00:38.064120 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:00:40 crc kubenswrapper[4791]: I0218 02:00:40.399306 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:40 crc kubenswrapper[4791]: I0218 02:00:40.399794 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:40 crc kubenswrapper[4791]: I0218 02:00:40.836985 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:40 crc kubenswrapper[4791]: I0218 02:00:40.887675 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:41 crc kubenswrapper[4791]: I0218 02:00:41.085716 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:41 crc kubenswrapper[4791]: I0218 02:00:41.549467 4791 generic.go:334] "Generic (PLEG): container finished" podID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerID="2ea384b028e95f80cb243082add4130e1c01b0ca035730a60f685495b21adae6" exitCode=0 Feb 18 02:00:41 crc kubenswrapper[4791]: I0218 02:00:41.549515 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerDied","Data":"2ea384b028e95f80cb243082add4130e1c01b0ca035730a60f685495b21adae6"} Feb 18 02:00:42 crc kubenswrapper[4791]: I0218 02:00:42.571821 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-84mqx" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="registry-server" containerID="cri-o://cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb" gracePeriod=2 Feb 18 02:00:42 crc kubenswrapper[4791]: I0218 02:00:42.572345 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerStarted","Data":"cc2507a0af176a191c5b12eaea4e789a2ce954aa86d849b92a56dbf3ce83d6a0"} Feb 18 02:00:42 crc kubenswrapper[4791]: I0218 02:00:42.602830 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nc6h7" podStartSLOduration=2.091247109 podStartE2EDuration="8.602807832s" podCreationTimestamp="2026-02-18 02:00:34 +0000 UTC" firstStartedPulling="2026-02-18 02:00:35.511642618 +0000 UTC m=+5177.079655788" lastFinishedPulling="2026-02-18 02:00:42.023203321 +0000 UTC m=+5183.591216511" observedRunningTime="2026-02-18 02:00:42.595380794 +0000 UTC m=+5184.163393974" watchObservedRunningTime="2026-02-18 02:00:42.602807832 +0000 UTC m=+5184.170821002" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.195307 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.212962 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqlmk\" (UniqueName: \"kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk\") pod \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.213043 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities\") pod \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.213374 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content\") pod \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\" (UID: \"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef\") " Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.214773 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities" (OuterVolumeSpecName: "utilities") pod "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" (UID: "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.223931 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk" (OuterVolumeSpecName: "kube-api-access-vqlmk") pod "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" (UID: "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef"). InnerVolumeSpecName "kube-api-access-vqlmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.272210 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" (UID: "d32c7a7c-f951-47ef-b4bd-ede6f428f5ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.316503 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqlmk\" (UniqueName: \"kubernetes.io/projected/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-kube-api-access-vqlmk\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.317126 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.317240 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.584841 4791 generic.go:334] "Generic (PLEG): container finished" podID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerID="cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb" exitCode=0 Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.584910 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-84mqx" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.586017 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerDied","Data":"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb"} Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.586137 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-84mqx" event={"ID":"d32c7a7c-f951-47ef-b4bd-ede6f428f5ef","Type":"ContainerDied","Data":"ff11a5c988417e4fb2c3fdf7f0e7b3151390f0820d3c299c799f134c8e73bcd0"} Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.586221 4791 scope.go:117] "RemoveContainer" containerID="cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.621734 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.621818 4791 scope.go:117] "RemoveContainer" containerID="e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.634253 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-84mqx"] Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.648052 4791 scope.go:117] "RemoveContainer" containerID="8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.706433 4791 scope.go:117] "RemoveContainer" containerID="cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb" Feb 18 02:00:43 crc kubenswrapper[4791]: E0218 02:00:43.707415 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb\": container with ID starting with cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb not found: ID does not exist" containerID="cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.707528 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb"} err="failed to get container status \"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb\": rpc error: code = NotFound desc = could not find container \"cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb\": container with ID starting with cf49635f2eef3b43309562c71577aedff44aa567e8ac3bfdb166c7d51777e7bb not found: ID does not exist" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.707579 4791 scope.go:117] "RemoveContainer" containerID="e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936" Feb 18 02:00:43 crc kubenswrapper[4791]: E0218 02:00:43.707885 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936\": container with ID starting with e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936 not found: ID does not exist" containerID="e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.707914 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936"} err="failed to get container status \"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936\": rpc error: code = NotFound desc = could not find container \"e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936\": container with ID starting with e4909a1368c998fec884a6ec8b651827693734f3e322746e8c786f4e129f1936 not found: ID does not exist" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.707929 4791 scope.go:117] "RemoveContainer" containerID="8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d" Feb 18 02:00:43 crc kubenswrapper[4791]: E0218 02:00:43.708289 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d\": container with ID starting with 8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d not found: ID does not exist" containerID="8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d" Feb 18 02:00:43 crc kubenswrapper[4791]: I0218 02:00:43.708403 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d"} err="failed to get container status \"8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d\": rpc error: code = NotFound desc = could not find container \"8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d\": container with ID starting with 8a3fa71824b7c6c0900c955fe7cb29ca5516e3f96b4634f3a2f2067dd6688a1d not found: ID does not exist" Feb 18 02:00:44 crc kubenswrapper[4791]: I0218 02:00:44.656553 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:44 crc kubenswrapper[4791]: I0218 02:00:44.656940 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:00:45 crc kubenswrapper[4791]: I0218 02:00:45.077035 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" path="/var/lib/kubelet/pods/d32c7a7c-f951-47ef-b4bd-ede6f428f5ef/volumes" Feb 18 02:00:45 crc kubenswrapper[4791]: I0218 02:00:45.724527 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nc6h7" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" probeResult="failure" output=< Feb 18 02:00:45 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 02:00:45 crc kubenswrapper[4791]: > Feb 18 02:00:46 crc kubenswrapper[4791]: I0218 02:00:46.626397 4791 generic.go:334] "Generic (PLEG): container finished" podID="bec10b7f-b30a-4c42-828c-2d3d86635d33" containerID="b7e89fb4563120f85c657d1532b2f359002a0217a76fa7ba8d0f4af269a538cc" exitCode=2 Feb 18 02:00:46 crc kubenswrapper[4791]: I0218 02:00:46.626642 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" event={"ID":"bec10b7f-b30a-4c42-828c-2d3d86635d33","Type":"ContainerDied","Data":"b7e89fb4563120f85c657d1532b2f359002a0217a76fa7ba8d0f4af269a538cc"} Feb 18 02:00:47 crc kubenswrapper[4791]: E0218 02:00:47.064360 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.233055 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.361764 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam\") pod \"bec10b7f-b30a-4c42-828c-2d3d86635d33\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.362886 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcrjb\" (UniqueName: \"kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb\") pod \"bec10b7f-b30a-4c42-828c-2d3d86635d33\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.362967 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory\") pod \"bec10b7f-b30a-4c42-828c-2d3d86635d33\" (UID: \"bec10b7f-b30a-4c42-828c-2d3d86635d33\") " Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.369947 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb" (OuterVolumeSpecName: "kube-api-access-fcrjb") pod "bec10b7f-b30a-4c42-828c-2d3d86635d33" (UID: "bec10b7f-b30a-4c42-828c-2d3d86635d33"). InnerVolumeSpecName "kube-api-access-fcrjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.401648 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bec10b7f-b30a-4c42-828c-2d3d86635d33" (UID: "bec10b7f-b30a-4c42-828c-2d3d86635d33"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.403188 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory" (OuterVolumeSpecName: "inventory") pod "bec10b7f-b30a-4c42-828c-2d3d86635d33" (UID: "bec10b7f-b30a-4c42-828c-2d3d86635d33"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.466528 4791 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.466575 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcrjb\" (UniqueName: \"kubernetes.io/projected/bec10b7f-b30a-4c42-828c-2d3d86635d33-kube-api-access-fcrjb\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.466585 4791 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bec10b7f-b30a-4c42-828c-2d3d86635d33-inventory\") on node \"crc\" DevicePath \"\"" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.650838 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" event={"ID":"bec10b7f-b30a-4c42-828c-2d3d86635d33","Type":"ContainerDied","Data":"04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5"} Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.650887 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04dc770dd30b39606345089c9dca65bd1ae7d4aa864233703757ae3e1a3c19a5" Feb 18 02:00:48 crc kubenswrapper[4791]: I0218 02:00:48.650944 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm" Feb 18 02:00:53 crc kubenswrapper[4791]: E0218 02:00:53.064707 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:00:55 crc kubenswrapper[4791]: I0218 02:00:55.703151 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nc6h7" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" probeResult="failure" output=< Feb 18 02:00:55 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 02:00:55 crc kubenswrapper[4791]: > Feb 18 02:00:58 crc kubenswrapper[4791]: E0218 02:00:58.063686 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.176042 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29523001-hb5h6"] Feb 18 02:01:00 crc kubenswrapper[4791]: E0218 02:01:00.177269 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bec10b7f-b30a-4c42-828c-2d3d86635d33" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177290 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="bec10b7f-b30a-4c42-828c-2d3d86635d33" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 02:01:00 crc kubenswrapper[4791]: E0218 02:01:00.177328 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="extract-utilities" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177337 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="extract-utilities" Feb 18 02:01:00 crc kubenswrapper[4791]: E0218 02:01:00.177364 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="registry-server" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177372 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="registry-server" Feb 18 02:01:00 crc kubenswrapper[4791]: E0218 02:01:00.177392 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="extract-content" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177400 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="extract-content" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177797 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="d32c7a7c-f951-47ef-b4bd-ede6f428f5ef" containerName="registry-server" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.177827 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="bec10b7f-b30a-4c42-828c-2d3d86635d33" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.181848 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.194709 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29523001-hb5h6"] Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.266188 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.266263 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnhtm\" (UniqueName: \"kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.266388 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.266456 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.369201 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.369288 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnhtm\" (UniqueName: \"kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.369380 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.369468 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.376239 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.376304 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.387847 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.389213 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnhtm\" (UniqueName: \"kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm\") pod \"keystone-cron-29523001-hb5h6\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:00 crc kubenswrapper[4791]: I0218 02:01:00.522715 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:01 crc kubenswrapper[4791]: I0218 02:01:01.001503 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29523001-hb5h6"] Feb 18 02:01:01 crc kubenswrapper[4791]: I0218 02:01:01.789838 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29523001-hb5h6" event={"ID":"bebf6739-bc0b-4a35-bcc8-4197efe64e1d","Type":"ContainerStarted","Data":"e7f9c8a4db6a09c81886570d42af79606fe113c38e38d38738c2d8af7ab9eca5"} Feb 18 02:01:01 crc kubenswrapper[4791]: I0218 02:01:01.790231 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29523001-hb5h6" event={"ID":"bebf6739-bc0b-4a35-bcc8-4197efe64e1d","Type":"ContainerStarted","Data":"03b234c716041622a53d53ffc737692a948e6105071b5cd23f562130a8d23663"} Feb 18 02:01:01 crc kubenswrapper[4791]: I0218 02:01:01.812179 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29523001-hb5h6" podStartSLOduration=1.812140201 podStartE2EDuration="1.812140201s" podCreationTimestamp="2026-02-18 02:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-18 02:01:01.806331543 +0000 UTC m=+5203.374344713" watchObservedRunningTime="2026-02-18 02:01:01.812140201 +0000 UTC m=+5203.380153381" Feb 18 02:01:04 crc kubenswrapper[4791]: I0218 02:01:04.831950 4791 generic.go:334] "Generic (PLEG): container finished" podID="bebf6739-bc0b-4a35-bcc8-4197efe64e1d" containerID="e7f9c8a4db6a09c81886570d42af79606fe113c38e38d38738c2d8af7ab9eca5" exitCode=0 Feb 18 02:01:04 crc kubenswrapper[4791]: I0218 02:01:04.832006 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29523001-hb5h6" event={"ID":"bebf6739-bc0b-4a35-bcc8-4197efe64e1d","Type":"ContainerDied","Data":"e7f9c8a4db6a09c81886570d42af79606fe113c38e38d38738c2d8af7ab9eca5"} Feb 18 02:01:05 crc kubenswrapper[4791]: I0218 02:01:05.712751 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nc6h7" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" probeResult="failure" output=< Feb 18 02:01:05 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 02:01:05 crc kubenswrapper[4791]: > Feb 18 02:01:06 crc kubenswrapper[4791]: E0218 02:01:06.065140 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.339706 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.422704 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle\") pod \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.422796 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data\") pod \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.422829 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnhtm\" (UniqueName: \"kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm\") pod \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.422898 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys\") pod \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\" (UID: \"bebf6739-bc0b-4a35-bcc8-4197efe64e1d\") " Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.430509 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm" (OuterVolumeSpecName: "kube-api-access-bnhtm") pod "bebf6739-bc0b-4a35-bcc8-4197efe64e1d" (UID: "bebf6739-bc0b-4a35-bcc8-4197efe64e1d"). InnerVolumeSpecName "kube-api-access-bnhtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.434087 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "bebf6739-bc0b-4a35-bcc8-4197efe64e1d" (UID: "bebf6739-bc0b-4a35-bcc8-4197efe64e1d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.466346 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bebf6739-bc0b-4a35-bcc8-4197efe64e1d" (UID: "bebf6739-bc0b-4a35-bcc8-4197efe64e1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.527286 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnhtm\" (UniqueName: \"kubernetes.io/projected/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-kube-api-access-bnhtm\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.527336 4791 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-fernet-keys\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.527349 4791 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.533921 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data" (OuterVolumeSpecName: "config-data") pod "bebf6739-bc0b-4a35-bcc8-4197efe64e1d" (UID: "bebf6739-bc0b-4a35-bcc8-4197efe64e1d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.630330 4791 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bebf6739-bc0b-4a35-bcc8-4197efe64e1d-config-data\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.851812 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29523001-hb5h6" event={"ID":"bebf6739-bc0b-4a35-bcc8-4197efe64e1d","Type":"ContainerDied","Data":"03b234c716041622a53d53ffc737692a948e6105071b5cd23f562130a8d23663"} Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.852127 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03b234c716041622a53d53ffc737692a948e6105071b5cd23f562130a8d23663" Feb 18 02:01:06 crc kubenswrapper[4791]: I0218 02:01:06.851919 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29523001-hb5h6" Feb 18 02:01:12 crc kubenswrapper[4791]: E0218 02:01:12.063433 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:01:14 crc kubenswrapper[4791]: I0218 02:01:14.745820 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:01:14 crc kubenswrapper[4791]: I0218 02:01:14.847130 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:01:15 crc kubenswrapper[4791]: I0218 02:01:15.005365 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:01:15 crc kubenswrapper[4791]: I0218 02:01:15.947056 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nc6h7" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" containerID="cri-o://cc2507a0af176a191c5b12eaea4e789a2ce954aa86d849b92a56dbf3ce83d6a0" gracePeriod=2 Feb 18 02:01:16 crc kubenswrapper[4791]: I0218 02:01:16.960360 4791 generic.go:334] "Generic (PLEG): container finished" podID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerID="cc2507a0af176a191c5b12eaea4e789a2ce954aa86d849b92a56dbf3ce83d6a0" exitCode=0 Feb 18 02:01:16 crc kubenswrapper[4791]: I0218 02:01:16.960433 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerDied","Data":"cc2507a0af176a191c5b12eaea4e789a2ce954aa86d849b92a56dbf3ce83d6a0"} Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.097934 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.220555 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities\") pod \"06e9861f-aebc-4aa5-bf77-8126aa73924e\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.220668 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content\") pod \"06e9861f-aebc-4aa5-bf77-8126aa73924e\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.220887 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw64m\" (UniqueName: \"kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m\") pod \"06e9861f-aebc-4aa5-bf77-8126aa73924e\" (UID: \"06e9861f-aebc-4aa5-bf77-8126aa73924e\") " Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.221996 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities" (OuterVolumeSpecName: "utilities") pod "06e9861f-aebc-4aa5-bf77-8126aa73924e" (UID: "06e9861f-aebc-4aa5-bf77-8126aa73924e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.223260 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.228245 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m" (OuterVolumeSpecName: "kube-api-access-xw64m") pod "06e9861f-aebc-4aa5-bf77-8126aa73924e" (UID: "06e9861f-aebc-4aa5-bf77-8126aa73924e"). InnerVolumeSpecName "kube-api-access-xw64m". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.326461 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw64m\" (UniqueName: \"kubernetes.io/projected/06e9861f-aebc-4aa5-bf77-8126aa73924e-kube-api-access-xw64m\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.362797 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06e9861f-aebc-4aa5-bf77-8126aa73924e" (UID: "06e9861f-aebc-4aa5-bf77-8126aa73924e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.428945 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e9861f-aebc-4aa5-bf77-8126aa73924e-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.973350 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nc6h7" event={"ID":"06e9861f-aebc-4aa5-bf77-8126aa73924e","Type":"ContainerDied","Data":"6dac6a38d2e37481f6b7fe37aa42c6e9ec2bb8919a415600bc7b267065d25dee"} Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.973431 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nc6h7" Feb 18 02:01:17 crc kubenswrapper[4791]: I0218 02:01:17.974649 4791 scope.go:117] "RemoveContainer" containerID="cc2507a0af176a191c5b12eaea4e789a2ce954aa86d849b92a56dbf3ce83d6a0" Feb 18 02:01:18 crc kubenswrapper[4791]: I0218 02:01:18.001449 4791 scope.go:117] "RemoveContainer" containerID="2ea384b028e95f80cb243082add4130e1c01b0ca035730a60f685495b21adae6" Feb 18 02:01:18 crc kubenswrapper[4791]: I0218 02:01:18.019505 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:01:18 crc kubenswrapper[4791]: I0218 02:01:18.033298 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nc6h7"] Feb 18 02:01:18 crc kubenswrapper[4791]: I0218 02:01:18.037592 4791 scope.go:117] "RemoveContainer" containerID="e9812a5f321ab9ddd31e4a83b525cb0947c649178e25d89a3429c01a6b410bc0" Feb 18 02:01:18 crc kubenswrapper[4791]: E0218 02:01:18.069733 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:01:19 crc kubenswrapper[4791]: I0218 02:01:19.073842 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" path="/var/lib/kubelet/pods/06e9861f-aebc-4aa5-bf77-8126aa73924e/volumes" Feb 18 02:01:24 crc kubenswrapper[4791]: E0218 02:01:24.063987 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:01:26 crc kubenswrapper[4791]: I0218 02:01:26.800233 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:01:26 crc kubenswrapper[4791]: I0218 02:01:26.800771 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:01:31 crc kubenswrapper[4791]: E0218 02:01:31.064109 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:01:39 crc kubenswrapper[4791]: E0218 02:01:39.070527 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:01:44 crc kubenswrapper[4791]: E0218 02:01:44.063673 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.006648 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6hkj/must-gather-q9txg"] Feb 18 02:01:51 crc kubenswrapper[4791]: E0218 02:01:51.007798 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bebf6739-bc0b-4a35-bcc8-4197efe64e1d" containerName="keystone-cron" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.007816 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="bebf6739-bc0b-4a35-bcc8-4197efe64e1d" containerName="keystone-cron" Feb 18 02:01:51 crc kubenswrapper[4791]: E0218 02:01:51.007865 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="extract-content" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.007873 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="extract-content" Feb 18 02:01:51 crc kubenswrapper[4791]: E0218 02:01:51.007895 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.007915 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" Feb 18 02:01:51 crc kubenswrapper[4791]: E0218 02:01:51.007925 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="extract-utilities" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.007933 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="extract-utilities" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.008237 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="bebf6739-bc0b-4a35-bcc8-4197efe64e1d" containerName="keystone-cron" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.008262 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e9861f-aebc-4aa5-bf77-8126aa73924e" containerName="registry-server" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.009807 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.018226 4791 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-w6hkj"/"default-dockercfg-7gsm5" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.018456 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w6hkj"/"openshift-service-ca.crt" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.023460 4791 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w6hkj"/"kube-root-ca.crt" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.025458 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w6hkj/must-gather-q9txg"] Feb 18 02:01:51 crc kubenswrapper[4791]: E0218 02:01:51.063926 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.127793 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.128428 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bgjl\" (UniqueName: \"kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.230399 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bgjl\" (UniqueName: \"kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.230513 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.230912 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.249256 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bgjl\" (UniqueName: \"kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl\") pod \"must-gather-q9txg\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.328069 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.814680 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w6hkj/must-gather-q9txg"] Feb 18 02:01:51 crc kubenswrapper[4791]: I0218 02:01:51.895737 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/must-gather-q9txg" event={"ID":"4831bc39-bb36-411b-a692-bf5d10b12d0d","Type":"ContainerStarted","Data":"26ac3b6dcc073b57759b17189b30bd456ed50c07d2a69fdfe0dfbf12e1d1b237"} Feb 18 02:01:56 crc kubenswrapper[4791]: E0218 02:01:56.063494 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:01:56 crc kubenswrapper[4791]: I0218 02:01:56.800341 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:01:56 crc kubenswrapper[4791]: I0218 02:01:56.800592 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:02:01 crc kubenswrapper[4791]: I0218 02:02:01.011789 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/must-gather-q9txg" event={"ID":"4831bc39-bb36-411b-a692-bf5d10b12d0d","Type":"ContainerStarted","Data":"7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd"} Feb 18 02:02:01 crc kubenswrapper[4791]: I0218 02:02:01.012543 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/must-gather-q9txg" event={"ID":"4831bc39-bb36-411b-a692-bf5d10b12d0d","Type":"ContainerStarted","Data":"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3"} Feb 18 02:02:01 crc kubenswrapper[4791]: I0218 02:02:01.048604 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w6hkj/must-gather-q9txg" podStartSLOduration=3.128538887 podStartE2EDuration="11.048585608s" podCreationTimestamp="2026-02-18 02:01:50 +0000 UTC" firstStartedPulling="2026-02-18 02:01:51.824838116 +0000 UTC m=+5253.392851286" lastFinishedPulling="2026-02-18 02:01:59.744884827 +0000 UTC m=+5261.312898007" observedRunningTime="2026-02-18 02:02:01.040754567 +0000 UTC m=+5262.608767747" watchObservedRunningTime="2026-02-18 02:02:01.048585608 +0000 UTC m=+5262.616598778" Feb 18 02:02:02 crc kubenswrapper[4791]: E0218 02:02:02.064207 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.530933 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-848mt"] Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.533387 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.567549 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.567688 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrm56\" (UniqueName: \"kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.669641 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.670068 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrm56\" (UniqueName: \"kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.670306 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.695369 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrm56\" (UniqueName: \"kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56\") pod \"crc-debug-848mt\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:05 crc kubenswrapper[4791]: I0218 02:02:05.858583 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:06 crc kubenswrapper[4791]: I0218 02:02:06.067510 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/crc-debug-848mt" event={"ID":"ddb7d663-08df-4862-ab1b-d8386c6cf742","Type":"ContainerStarted","Data":"468f87f957e610e476d7e52dd6260a2cbe5f4bbb1a3ffda93475e38418a15fb6"} Feb 18 02:02:09 crc kubenswrapper[4791]: E0218 02:02:09.072933 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:02:14 crc kubenswrapper[4791]: E0218 02:02:14.064462 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:02:18 crc kubenswrapper[4791]: I0218 02:02:18.214924 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/crc-debug-848mt" event={"ID":"ddb7d663-08df-4862-ab1b-d8386c6cf742","Type":"ContainerStarted","Data":"ea2f1591e971c8e16289d17943f302688cbb64e04a3bdd995372da05a06ccadd"} Feb 18 02:02:18 crc kubenswrapper[4791]: I0218 02:02:18.237073 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w6hkj/crc-debug-848mt" podStartSLOduration=1.532273005 podStartE2EDuration="13.237049439s" podCreationTimestamp="2026-02-18 02:02:05 +0000 UTC" firstStartedPulling="2026-02-18 02:02:05.89601011 +0000 UTC m=+5267.464023280" lastFinishedPulling="2026-02-18 02:02:17.600786544 +0000 UTC m=+5279.168799714" observedRunningTime="2026-02-18 02:02:18.228583069 +0000 UTC m=+5279.796596239" watchObservedRunningTime="2026-02-18 02:02:18.237049439 +0000 UTC m=+5279.805062599" Feb 18 02:02:22 crc kubenswrapper[4791]: E0218 02:02:22.063832 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:02:26 crc kubenswrapper[4791]: E0218 02:02:26.066242 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:02:26 crc kubenswrapper[4791]: I0218 02:02:26.799971 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:02:26 crc kubenswrapper[4791]: I0218 02:02:26.800535 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:02:26 crc kubenswrapper[4791]: I0218 02:02:26.800600 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 02:02:26 crc kubenswrapper[4791]: I0218 02:02:26.801460 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 02:02:26 crc kubenswrapper[4791]: I0218 02:02:26.801545 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" gracePeriod=600 Feb 18 02:02:26 crc kubenswrapper[4791]: E0218 02:02:26.930957 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:02:27 crc kubenswrapper[4791]: I0218 02:02:27.307706 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" exitCode=0 Feb 18 02:02:27 crc kubenswrapper[4791]: I0218 02:02:27.307759 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8"} Feb 18 02:02:27 crc kubenswrapper[4791]: I0218 02:02:27.307801 4791 scope.go:117] "RemoveContainer" containerID="f344a350fe300a77dd4db872e88658f1996dd926c3d0ada34243c9da47ff930a" Feb 18 02:02:27 crc kubenswrapper[4791]: I0218 02:02:27.308510 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:02:27 crc kubenswrapper[4791]: E0218 02:02:27.309097 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:02:33 crc kubenswrapper[4791]: E0218 02:02:33.066762 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:02:34 crc kubenswrapper[4791]: I0218 02:02:34.388185 4791 generic.go:334] "Generic (PLEG): container finished" podID="ddb7d663-08df-4862-ab1b-d8386c6cf742" containerID="ea2f1591e971c8e16289d17943f302688cbb64e04a3bdd995372da05a06ccadd" exitCode=0 Feb 18 02:02:34 crc kubenswrapper[4791]: I0218 02:02:34.388202 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/crc-debug-848mt" event={"ID":"ddb7d663-08df-4862-ab1b-d8386c6cf742","Type":"ContainerDied","Data":"ea2f1591e971c8e16289d17943f302688cbb64e04a3bdd995372da05a06ccadd"} Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.265097 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.287028 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrm56\" (UniqueName: \"kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56\") pod \"ddb7d663-08df-4862-ab1b-d8386c6cf742\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.287376 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host\") pod \"ddb7d663-08df-4862-ab1b-d8386c6cf742\" (UID: \"ddb7d663-08df-4862-ab1b-d8386c6cf742\") " Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.288509 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host" (OuterVolumeSpecName: "host") pod "ddb7d663-08df-4862-ab1b-d8386c6cf742" (UID: "ddb7d663-08df-4862-ab1b-d8386c6cf742"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.289499 4791 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ddb7d663-08df-4862-ab1b-d8386c6cf742-host\") on node \"crc\" DevicePath \"\"" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.304386 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56" (OuterVolumeSpecName: "kube-api-access-nrm56") pod "ddb7d663-08df-4862-ab1b-d8386c6cf742" (UID: "ddb7d663-08df-4862-ab1b-d8386c6cf742"). InnerVolumeSpecName "kube-api-access-nrm56". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.324586 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-848mt"] Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.337781 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-848mt"] Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.391689 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrm56\" (UniqueName: \"kubernetes.io/projected/ddb7d663-08df-4862-ab1b-d8386c6cf742-kube-api-access-nrm56\") on node \"crc\" DevicePath \"\"" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.409229 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-848mt" Feb 18 02:02:36 crc kubenswrapper[4791]: I0218 02:02:36.409241 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="468f87f957e610e476d7e52dd6260a2cbe5f4bbb1a3ffda93475e38418a15fb6" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.073661 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddb7d663-08df-4862-ab1b-d8386c6cf742" path="/var/lib/kubelet/pods/ddb7d663-08df-4862-ab1b-d8386c6cf742/volumes" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.601894 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-f59cd"] Feb 18 02:02:37 crc kubenswrapper[4791]: E0218 02:02:37.602493 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb7d663-08df-4862-ab1b-d8386c6cf742" containerName="container-00" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.602512 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb7d663-08df-4862-ab1b-d8386c6cf742" containerName="container-00" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.602821 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb7d663-08df-4862-ab1b-d8386c6cf742" containerName="container-00" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.603643 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.620121 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.620555 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djjsb\" (UniqueName: \"kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.723295 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.723373 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djjsb\" (UniqueName: \"kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:37 crc kubenswrapper[4791]: I0218 02:02:37.723440 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:38 crc kubenswrapper[4791]: I0218 02:02:38.299811 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djjsb\" (UniqueName: \"kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb\") pod \"crc-debug-f59cd\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:38 crc kubenswrapper[4791]: I0218 02:02:38.525332 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.069015 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:02:39 crc kubenswrapper[4791]: E0218 02:02:39.069542 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:02:39 crc kubenswrapper[4791]: E0218 02:02:39.069777 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.472200 4791 generic.go:334] "Generic (PLEG): container finished" podID="7f2f982e-e239-46f3-910b-ab93a0930b3f" containerID="563db0f6595603d3048e303e2465a488181841d67204661d66904d30b49e1f54" exitCode=1 Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.472246 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" event={"ID":"7f2f982e-e239-46f3-910b-ab93a0930b3f","Type":"ContainerDied","Data":"563db0f6595603d3048e303e2465a488181841d67204661d66904d30b49e1f54"} Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.472276 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" event={"ID":"7f2f982e-e239-46f3-910b-ab93a0930b3f","Type":"ContainerStarted","Data":"3c0eb9f2bcbf19f396a045e323269bf920ed9483bb0542853e413fcc7a3915e1"} Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.514011 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-f59cd"] Feb 18 02:02:39 crc kubenswrapper[4791]: I0218 02:02:39.526862 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6hkj/crc-debug-f59cd"] Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.606219 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.694384 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host\") pod \"7f2f982e-e239-46f3-910b-ab93a0930b3f\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.694496 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host" (OuterVolumeSpecName: "host") pod "7f2f982e-e239-46f3-910b-ab93a0930b3f" (UID: "7f2f982e-e239-46f3-910b-ab93a0930b3f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.694681 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djjsb\" (UniqueName: \"kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb\") pod \"7f2f982e-e239-46f3-910b-ab93a0930b3f\" (UID: \"7f2f982e-e239-46f3-910b-ab93a0930b3f\") " Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.696235 4791 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7f2f982e-e239-46f3-910b-ab93a0930b3f-host\") on node \"crc\" DevicePath \"\"" Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.705372 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb" (OuterVolumeSpecName: "kube-api-access-djjsb") pod "7f2f982e-e239-46f3-910b-ab93a0930b3f" (UID: "7f2f982e-e239-46f3-910b-ab93a0930b3f"). InnerVolumeSpecName "kube-api-access-djjsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:02:40 crc kubenswrapper[4791]: I0218 02:02:40.798419 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djjsb\" (UniqueName: \"kubernetes.io/projected/7f2f982e-e239-46f3-910b-ab93a0930b3f-kube-api-access-djjsb\") on node \"crc\" DevicePath \"\"" Feb 18 02:02:41 crc kubenswrapper[4791]: I0218 02:02:41.072923 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f2f982e-e239-46f3-910b-ab93a0930b3f" path="/var/lib/kubelet/pods/7f2f982e-e239-46f3-910b-ab93a0930b3f/volumes" Feb 18 02:02:41 crc kubenswrapper[4791]: I0218 02:02:41.501365 4791 scope.go:117] "RemoveContainer" containerID="563db0f6595603d3048e303e2465a488181841d67204661d66904d30b49e1f54" Feb 18 02:02:41 crc kubenswrapper[4791]: I0218 02:02:41.501850 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/crc-debug-f59cd" Feb 18 02:02:48 crc kubenswrapper[4791]: E0218 02:02:48.064079 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:02:50 crc kubenswrapper[4791]: I0218 02:02:50.060978 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:02:50 crc kubenswrapper[4791]: E0218 02:02:50.061920 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:02:52 crc kubenswrapper[4791]: I0218 02:02:52.063267 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 02:02:52 crc kubenswrapper[4791]: E0218 02:02:52.148878 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:02:52 crc kubenswrapper[4791]: E0218 02:02:52.148941 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:02:52 crc kubenswrapper[4791]: E0218 02:02:52.149399 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:02:52 crc kubenswrapper[4791]: E0218 02:02:52.150721 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:03:03 crc kubenswrapper[4791]: E0218 02:03:03.063070 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:03:05 crc kubenswrapper[4791]: I0218 02:03:05.062062 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:03:05 crc kubenswrapper[4791]: E0218 02:03:05.063334 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:03:07 crc kubenswrapper[4791]: E0218 02:03:07.063458 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:03:17 crc kubenswrapper[4791]: E0218 02:03:17.064653 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:03:18 crc kubenswrapper[4791]: I0218 02:03:18.062106 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:03:18 crc kubenswrapper[4791]: E0218 02:03:18.062810 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:03:18 crc kubenswrapper[4791]: E0218 02:03:18.068943 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.464779 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:28 crc kubenswrapper[4791]: E0218 02:03:28.465880 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f2f982e-e239-46f3-910b-ab93a0930b3f" containerName="container-00" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.465892 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f2f982e-e239-46f3-910b-ab93a0930b3f" containerName="container-00" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.466149 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f2f982e-e239-46f3-910b-ab93a0930b3f" containerName="container-00" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.471230 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.490098 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.625190 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ppf\" (UniqueName: \"kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.625240 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.625354 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.727544 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ppf\" (UniqueName: \"kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.727593 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.727656 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.728150 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.728246 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.753247 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ppf\" (UniqueName: \"kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf\") pod \"community-operators-52t8v\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:28 crc kubenswrapper[4791]: I0218 02:03:28.794754 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:29 crc kubenswrapper[4791]: I0218 02:03:29.546940 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:30 crc kubenswrapper[4791]: I0218 02:03:30.020180 4791 generic.go:334] "Generic (PLEG): container finished" podID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerID="c0fdeb6fbfcaeb281f0b4259e0c082b443aeece5503f3230b10bb9363184534d" exitCode=0 Feb 18 02:03:30 crc kubenswrapper[4791]: I0218 02:03:30.020291 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerDied","Data":"c0fdeb6fbfcaeb281f0b4259e0c082b443aeece5503f3230b10bb9363184534d"} Feb 18 02:03:30 crc kubenswrapper[4791]: I0218 02:03:30.020493 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerStarted","Data":"3041455112b6ebe7fd0bce0a2e13d6090b26c63a08ca00553996594f03bb6b5c"} Feb 18 02:03:31 crc kubenswrapper[4791]: E0218 02:03:31.062998 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:03:32 crc kubenswrapper[4791]: I0218 02:03:32.040863 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerStarted","Data":"67f90259a12d576e6b2566e813ad79543be4484d4b163e6096401a9a2f2673cb"} Feb 18 02:03:32 crc kubenswrapper[4791]: E0218 02:03:32.184658 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:03:32 crc kubenswrapper[4791]: E0218 02:03:32.184829 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:03:32 crc kubenswrapper[4791]: E0218 02:03:32.185018 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:03:32 crc kubenswrapper[4791]: E0218 02:03:32.186403 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:03:33 crc kubenswrapper[4791]: I0218 02:03:33.057263 4791 generic.go:334] "Generic (PLEG): container finished" podID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerID="67f90259a12d576e6b2566e813ad79543be4484d4b163e6096401a9a2f2673cb" exitCode=0 Feb 18 02:03:33 crc kubenswrapper[4791]: I0218 02:03:33.057599 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerDied","Data":"67f90259a12d576e6b2566e813ad79543be4484d4b163e6096401a9a2f2673cb"} Feb 18 02:03:33 crc kubenswrapper[4791]: I0218 02:03:33.060816 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:03:33 crc kubenswrapper[4791]: E0218 02:03:33.061058 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:03:34 crc kubenswrapper[4791]: I0218 02:03:34.071499 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerStarted","Data":"b7eff96e24e9dc0ae8532eccb2d78fcd11c5e2fe83a0f36ff2304fd45a9597a9"} Feb 18 02:03:34 crc kubenswrapper[4791]: I0218 02:03:34.100523 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-52t8v" podStartSLOduration=2.634777007 podStartE2EDuration="6.100503775s" podCreationTimestamp="2026-02-18 02:03:28 +0000 UTC" firstStartedPulling="2026-02-18 02:03:30.022135579 +0000 UTC m=+5351.590148749" lastFinishedPulling="2026-02-18 02:03:33.487862317 +0000 UTC m=+5355.055875517" observedRunningTime="2026-02-18 02:03:34.096414019 +0000 UTC m=+5355.664427199" watchObservedRunningTime="2026-02-18 02:03:34.100503775 +0000 UTC m=+5355.668516965" Feb 18 02:03:38 crc kubenswrapper[4791]: I0218 02:03:38.795028 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:38 crc kubenswrapper[4791]: I0218 02:03:38.796525 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:38 crc kubenswrapper[4791]: I0218 02:03:38.843247 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:39 crc kubenswrapper[4791]: I0218 02:03:39.284666 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:39 crc kubenswrapper[4791]: I0218 02:03:39.334361 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:40 crc kubenswrapper[4791]: I0218 02:03:40.407039 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0ccf0634-a041-40a2-9213-614aac6f82c4/aodh-api/0.log" Feb 18 02:03:40 crc kubenswrapper[4791]: I0218 02:03:40.537305 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0ccf0634-a041-40a2-9213-614aac6f82c4/aodh-evaluator/0.log" Feb 18 02:03:40 crc kubenswrapper[4791]: I0218 02:03:40.839387 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0ccf0634-a041-40a2-9213-614aac6f82c4/aodh-notifier/0.log" Feb 18 02:03:40 crc kubenswrapper[4791]: I0218 02:03:40.875458 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_0ccf0634-a041-40a2-9213-614aac6f82c4/aodh-listener/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.025780 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-654c877dd4-hwg2j_90af684f-c845-4617-a1d0-106ffecccdfc/barbican-api/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.048233 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-654c877dd4-hwg2j_90af684f-c845-4617-a1d0-106ffecccdfc/barbican-api-log/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.076679 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-54d8d8c67d-vxfgq_2c364d42-b757-4c65-a010-5db856347830/barbican-keystone-listener/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.175583 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-52t8v" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="registry-server" containerID="cri-o://b7eff96e24e9dc0ae8532eccb2d78fcd11c5e2fe83a0f36ff2304fd45a9597a9" gracePeriod=2 Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.252615 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-54d8d8c67d-vxfgq_2c364d42-b757-4c65-a010-5db856347830/barbican-keystone-listener-log/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.318945 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c544fb58c-t65m4_db6663e2-3aa9-4ddc-8e0a-e4647fccd511/barbican-worker/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.369304 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-c544fb58c-t65m4_db6663e2-3aa9-4ddc-8e0a-e4647fccd511/barbican-worker-log/0.log" Feb 18 02:03:41 crc kubenswrapper[4791]: I0218 02:03:41.618830 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-fgcg2_21e731fb-b216-48ca-b351-c1b511ed7617/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.189113 4791 generic.go:334] "Generic (PLEG): container finished" podID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerID="b7eff96e24e9dc0ae8532eccb2d78fcd11c5e2fe83a0f36ff2304fd45a9597a9" exitCode=0 Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.189144 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerDied","Data":"b7eff96e24e9dc0ae8532eccb2d78fcd11c5e2fe83a0f36ff2304fd45a9597a9"} Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.189437 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-52t8v" event={"ID":"97f48b91-2087-41cd-902d-75da9fa5ea8c","Type":"ContainerDied","Data":"3041455112b6ebe7fd0bce0a2e13d6090b26c63a08ca00553996594f03bb6b5c"} Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.189449 4791 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3041455112b6ebe7fd0bce0a2e13d6090b26c63a08ca00553996594f03bb6b5c" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.215866 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.321672 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content\") pod \"97f48b91-2087-41cd-902d-75da9fa5ea8c\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.321721 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66ppf\" (UniqueName: \"kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf\") pod \"97f48b91-2087-41cd-902d-75da9fa5ea8c\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.321951 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities\") pod \"97f48b91-2087-41cd-902d-75da9fa5ea8c\" (UID: \"97f48b91-2087-41cd-902d-75da9fa5ea8c\") " Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.322805 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_4b9cec47-aeda-40f0-b83e-46f09ce65e95/ceilometer-notification-agent/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.323291 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities" (OuterVolumeSpecName: "utilities") pod "97f48b91-2087-41cd-902d-75da9fa5ea8c" (UID: "97f48b91-2087-41cd-902d-75da9fa5ea8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.335413 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf" (OuterVolumeSpecName: "kube-api-access-66ppf") pod "97f48b91-2087-41cd-902d-75da9fa5ea8c" (UID: "97f48b91-2087-41cd-902d-75da9fa5ea8c"). InnerVolumeSpecName "kube-api-access-66ppf". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.371735 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_4b9cec47-aeda-40f0-b83e-46f09ce65e95/proxy-httpd/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.393666 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_4b9cec47-aeda-40f0-b83e-46f09ce65e95/sg-core/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.399915 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "97f48b91-2087-41cd-902d-75da9fa5ea8c" (UID: "97f48b91-2087-41cd-902d-75da9fa5ea8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.424543 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.424577 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66ppf\" (UniqueName: \"kubernetes.io/projected/97f48b91-2087-41cd-902d-75da9fa5ea8c-kube-api-access-66ppf\") on node \"crc\" DevicePath \"\"" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.424589 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97f48b91-2087-41cd-902d-75da9fa5ea8c-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.627575 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_6bfc0893-2e4f-43da-b675-15687e8a3436/cinder-api/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.692785 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5983f269-c1da-4b3d-90dd-083fa90022eb/cinder-scheduler/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.709840 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_6bfc0893-2e4f-43da-b675-15687e8a3436/cinder-api-log/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.912986 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5983f269-c1da-4b3d-90dd-083fa90022eb/probe/0.log" Feb 18 02:03:42 crc kubenswrapper[4791]: I0218 02:03:42.941738 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fgcpw_9d4d756f-d3c4-4fd5-a75e-0df5c33004fb/init/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.198858 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-52t8v" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.227406 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.237334 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-52t8v"] Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.241099 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fgcpw_9d4d756f-d3c4-4fd5-a75e-0df5c33004fb/init/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.242124 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-fgcpw_9d4d756f-d3c4-4fd5-a75e-0df5c33004fb/dnsmasq-dns/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.244723 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-55w62_6fbf1afd-08c9-4fb1-87b9-816b3846145b/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.440769 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-68jq5_4f1c775a-693d-40ae-b01c-00632b39e8b1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.507628 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-gpsbw_43c4470a-baeb-43d6-bb3e-ff571be8c778/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.742293 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-jgpvr_80cb92f6-d3a1-44dc-96de-ce408815087a/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.750570 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-pvgl7_8bc0f1f9-4a48-410f-8911-d599f1fcdb84/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:43 crc kubenswrapper[4791]: I0218 02:03:43.966486 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-rd8cm_bec10b7f-b30a-4c42-828c-2d3d86635d33/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:44 crc kubenswrapper[4791]: I0218 02:03:44.017621 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vs565_2bee1000-5b84-4271-9e51-adb4f12eaadb/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:44 crc kubenswrapper[4791]: I0218 02:03:44.245884 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_04c2d5ce-ba0d-4b53-8396-ee56f79b1c81/glance-httpd/0.log" Feb 18 02:03:44 crc kubenswrapper[4791]: I0218 02:03:44.283549 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_04c2d5ce-ba0d-4b53-8396-ee56f79b1c81/glance-log/0.log" Feb 18 02:03:44 crc kubenswrapper[4791]: I0218 02:03:44.665376 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_5a7ad9d2-ee92-4248-be3d-e687312e819f/glance-httpd/0.log" Feb 18 02:03:44 crc kubenswrapper[4791]: I0218 02:03:44.743971 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_5a7ad9d2-ee92-4248-be3d-e687312e819f/glance-log/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.076229 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" path="/var/lib/kubelet/pods/97f48b91-2087-41cd-902d-75da9fa5ea8c/volumes" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.359309 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-7d9d7f9648-qr5lg_50df18fd-8515-4e5b-a699-98930a83e9a7/heat-api/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.370311 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-7b5c5fc9b-vzf74_a103b173-b84f-4c1d-bf8f-bf278b570051/heat-cfnapi/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.404915 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-b4df76d98-82cxf_c31267c4-b30a-478e-b67a-0231013883df/heat-engine/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.604002 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-85bf744856-fnxxz_ecce6854-dfe6-4480-9248-190d2eacff79/keystone-api/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.605843 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29522941-j7h5m_7335be95-38b6-4ee3-9e54-ec94854cda08/keystone-cron/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.645594 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29523001-hb5h6_bebf6739-bc0b-4a35-bcc8-4197efe64e1d/keystone-cron/0.log" Feb 18 02:03:45 crc kubenswrapper[4791]: I0218 02:03:45.778613 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_23f7febf-286b-49f0-a4db-9aada2d4a4d7/kube-state-metrics/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: E0218 02:03:46.065819 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:03:46 crc kubenswrapper[4791]: E0218 02:03:46.066332 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.066826 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_e0395872-356e-4d2f-ba73-bd4d2feed605/mysqld-exporter/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.190626 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-8475878fc-kbpdv_62acb3be-36b9-469a-9714-5e29539324dc/neutron-api/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.206577 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-8475878fc-kbpdv_62acb3be-36b9-469a-9714-5e29539324dc/neutron-httpd/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.543101 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_c00433e0-9dce-47aa-a1b5-f4876de61b2e/nova-api-log/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.722832 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_352e15b6-33bb-4c81-9ace-0450f934eec1/nova-cell0-conductor-conductor/0.log" Feb 18 02:03:46 crc kubenswrapper[4791]: I0218 02:03:46.958639 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_c00433e0-9dce-47aa-a1b5-f4876de61b2e/nova-api-api/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.002642 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_686dcc60-b95f-43ca-bdb2-5045f8289bec/nova-cell1-conductor-conductor/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.060941 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:03:47 crc kubenswrapper[4791]: E0218 02:03:47.061342 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.278110 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_87cb2d4e-852a-45d2-b7e1-b6fd1811b6e5/nova-cell1-novncproxy-novncproxy/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.318966 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_17b7a426-4dd0-4349-999d-1f323e337be0/nova-metadata-log/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.647607 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_8f5fd517-5092-4db3-9a3f-04fd12f9829a/nova-scheduler-scheduler/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.688704 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_af0c20ed-84d0-4674-8664-1de72a190f84/mysql-bootstrap/0.log" Feb 18 02:03:47 crc kubenswrapper[4791]: I0218 02:03:47.946996 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_af0c20ed-84d0-4674-8664-1de72a190f84/mysql-bootstrap/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.008302 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_af0c20ed-84d0-4674-8664-1de72a190f84/galera/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.228035 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9e1b835-c4bf-4722-b4f5-512b3439fdfb/mysql-bootstrap/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.344811 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9e1b835-c4bf-4722-b4f5-512b3439fdfb/mysql-bootstrap/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.376968 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f9e1b835-c4bf-4722-b4f5-512b3439fdfb/galera/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.547147 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_e10a79e5-3fc8-492a-9551-ee6cd80c2f83/openstackclient/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.683490 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-5shlq_54deb01e-caa1-4fe0-8bd0-d412c4d73210/ovn-controller/0.log" Feb 18 02:03:48 crc kubenswrapper[4791]: I0218 02:03:48.907534 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-l7hr6_b959be4d-fe96-46d6-bf53-c76a3fbd2647/openstack-network-exporter/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.060566 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-l4g2x_8ac723a9-6515-4a69-aca1-95e459bf2047/ovsdb-server-init/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.293855 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-l4g2x_8ac723a9-6515-4a69-aca1-95e459bf2047/ovsdb-server-init/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.318445 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-l4g2x_8ac723a9-6515-4a69-aca1-95e459bf2047/ovs-vswitchd/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.345126 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-l4g2x_8ac723a9-6515-4a69-aca1-95e459bf2047/ovsdb-server/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.381318 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_17b7a426-4dd0-4349-999d-1f323e337be0/nova-metadata-metadata/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.541364 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d5580709-c614-4e19-a6c2-58f2ea044e0e/openstack-network-exporter/0.log" Feb 18 02:03:49 crc kubenswrapper[4791]: I0218 02:03:49.570783 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d5580709-c614-4e19-a6c2-58f2ea044e0e/ovn-northd/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.264572 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_45f7687b-ba74-41f7-bb24-e34698bb35c4/ovsdbserver-nb/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.302980 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ebbcd631-eef9-4249-a5a0-7aeef10d5d4e/openstack-network-exporter/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.316523 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_45f7687b-ba74-41f7-bb24-e34698bb35c4/openstack-network-exporter/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.529402 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ebbcd631-eef9-4249-a5a0-7aeef10d5d4e/ovsdbserver-sb/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.595708 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-568cb6f944-cxt84_58b78dc2-71f8-4009-b3b0-f3db0fee55c4/placement-api/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.750406 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-568cb6f944-cxt84_58b78dc2-71f8-4009-b3b0-f3db0fee55c4/placement-log/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.755449 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_6658e3ff-2b3b-476c-8638-a5b3d94005d4/init-config-reloader/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.977483 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_6658e3ff-2b3b-476c-8638-a5b3d94005d4/prometheus/0.log" Feb 18 02:03:50 crc kubenswrapper[4791]: I0218 02:03:50.993821 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_6658e3ff-2b3b-476c-8638-a5b3d94005d4/init-config-reloader/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.002898 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_6658e3ff-2b3b-476c-8638-a5b3d94005d4/config-reloader/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.078862 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_6658e3ff-2b3b-476c-8638-a5b3d94005d4/thanos-sidecar/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.202228 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_96310c28-c67e-463a-ab1e-beb273a7434e/setup-container/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.440832 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_96310c28-c67e-463a-ab1e-beb273a7434e/setup-container/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.450727 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_516b6fcb-95c5-4c07-80b8-e1904970035b/setup-container/0.log" Feb 18 02:03:51 crc kubenswrapper[4791]: I0218 02:03:51.488062 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_96310c28-c67e-463a-ab1e-beb273a7434e/rabbitmq/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.092200 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e685bb75-4a24-4b83-8385-e6f5bdc9a526/setup-container/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.145639 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_516b6fcb-95c5-4c07-80b8-e1904970035b/setup-container/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.160463 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_516b6fcb-95c5-4c07-80b8-e1904970035b/rabbitmq/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.401103 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e685bb75-4a24-4b83-8385-e6f5bdc9a526/rabbitmq/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.436236 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d/setup-container/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.466597 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_e685bb75-4a24-4b83-8385-e6f5bdc9a526/setup-container/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.673323 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d/setup-container/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.729177 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_37f2d008-97f2-4a1c-a9d9-8fa1df55cd2d/rabbitmq/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.767418 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-wzr92_52c76fd4-a538-4f07-ad60-4c3beb4490f7/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:52 crc kubenswrapper[4791]: I0218 02:03:52.933764 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-4x5dt_25084610-eb17-4a22-bb76-3b67b38e4402/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.209708 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5c6c54c7f5-xvkc2_f0488233-4091-4798-b96e-194d46245d44/proxy-server/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.224497 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-xlbf7_ddbbd37f-70d8-40d3-9f79-2c8172c4d589/swift-ring-rebalance/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.235402 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5c6c54c7f5-xvkc2_f0488233-4091-4798-b96e-194d46245d44/proxy-httpd/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.677367 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/account-reaper/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.702148 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/account-auditor/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.731720 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/account-replicator/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.833168 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/account-server/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.890234 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/container-auditor/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.949003 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/container-replicator/0.log" Feb 18 02:03:53 crc kubenswrapper[4791]: I0218 02:03:53.962517 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/container-server/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.052072 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/container-updater/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.203034 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/object-auditor/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.214720 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/object-expirer/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.222575 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/object-replicator/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.312575 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/object-server/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.436404 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/object-updater/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.449395 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/rsync/0.log" Feb 18 02:03:54 crc kubenswrapper[4791]: I0218 02:03:54.467866 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_569159b6-791b-428c-84c7-5387c17a731b/swift-recon-cron/0.log" Feb 18 02:03:59 crc kubenswrapper[4791]: I0218 02:03:59.722563 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_e3e07d33-a963-4a49-b9f4-eb2b867eae6a/memcached/0.log" Feb 18 02:04:00 crc kubenswrapper[4791]: I0218 02:04:00.060993 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:04:00 crc kubenswrapper[4791]: E0218 02:04:00.061564 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:04:00 crc kubenswrapper[4791]: E0218 02:04:00.063034 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:04:01 crc kubenswrapper[4791]: E0218 02:04:01.064209 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:04:13 crc kubenswrapper[4791]: E0218 02:04:13.064975 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:04:14 crc kubenswrapper[4791]: E0218 02:04:14.063394 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:04:15 crc kubenswrapper[4791]: I0218 02:04:15.061641 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:04:15 crc kubenswrapper[4791]: E0218 02:04:15.061932 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:04:25 crc kubenswrapper[4791]: E0218 02:04:25.063872 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.559941 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/util/0.log" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.720479 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/pull/0.log" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.766921 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/pull/0.log" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.781839 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/util/0.log" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.975725 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/pull/0.log" Feb 18 02:04:25 crc kubenswrapper[4791]: I0218 02:04:25.999234 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/extract/0.log" Feb 18 02:04:26 crc kubenswrapper[4791]: I0218 02:04:26.013871 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_839821d02b67fa352b5f2f2742cf71374a58067197cd468c715f3fd4e7c76qd_b8c638bd-8bce-4fc7-9289-312eee3c2be4/util/0.log" Feb 18 02:04:26 crc kubenswrapper[4791]: I0218 02:04:26.448864 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d8bf5c495-nzmq6_e8318b6f-bf0c-447f-a43c-0ac54f9c60a4/manager/0.log" Feb 18 02:04:26 crc kubenswrapper[4791]: I0218 02:04:26.811999 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987464f4-k2x8h_b9ce1944-2b04-437c-9b3e-4cafb6d68ecf/manager/0.log" Feb 18 02:04:27 crc kubenswrapper[4791]: I0218 02:04:27.062231 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:04:27 crc kubenswrapper[4791]: E0218 02:04:27.062539 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:04:27 crc kubenswrapper[4791]: I0218 02:04:27.148131 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69f49c598c-dhrvm_2f773ffa-c1bb-4096-b44d-b01e7d9158c3/manager/0.log" Feb 18 02:04:27 crc kubenswrapper[4791]: I0218 02:04:27.253324 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5b9b8895d5-nq4q5_cd9b7e04-15d6-4e16-9e30-85bbd31605fa/manager/0.log" Feb 18 02:04:28 crc kubenswrapper[4791]: I0218 02:04:28.135809 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-554564d7fc-265gr_e029392b-011d-4edb-84bf-851fb6e9828f/manager/0.log" Feb 18 02:04:28 crc kubenswrapper[4791]: I0218 02:04:28.311638 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79d975b745-4xrps_c0f7fb62-ebcb-4989-8913-8b4b488df740/manager/0.log" Feb 18 02:04:28 crc kubenswrapper[4791]: I0218 02:04:28.694802 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b4d948c87-jqbhr_4e11abdf-c683-4f45-a448-dcdfadbd9731/manager/0.log" Feb 18 02:04:28 crc kubenswrapper[4791]: I0218 02:04:28.856888 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-54f6768c69-mw585_7ebed3cd-6f5f-4d8b-9b8c-d857c9ca0177/manager/0.log" Feb 18 02:04:29 crc kubenswrapper[4791]: E0218 02:04:29.084135 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:04:29 crc kubenswrapper[4791]: I0218 02:04:29.146338 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6994f66f48-9sxdc_af444709-16b7-4e86-bf27-b0f4bcbd07d6/manager/0.log" Feb 18 02:04:29 crc kubenswrapper[4791]: I0218 02:04:29.220464 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5d946d989d-pbsxj_e10a708b-16cd-467f-b166-00429da94123/manager/0.log" Feb 18 02:04:29 crc kubenswrapper[4791]: I0218 02:04:29.443228 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-64ddbf8bb-9xxkb_0864bcf0-8b89-4a98-b294-cac0ec858221/manager/0.log" Feb 18 02:04:29 crc kubenswrapper[4791]: I0218 02:04:29.574329 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-567668f5cf-2klv9_f1db1f83-f5b1-4c8d-af64-92816b1aa96d/manager/0.log" Feb 18 02:04:29 crc kubenswrapper[4791]: I0218 02:04:29.865273 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7c6767dc9cj5nmp_be246d6d-b866-4edb-bb80-3c84e27f0caa/manager/0.log" Feb 18 02:04:30 crc kubenswrapper[4791]: I0218 02:04:30.386772 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-69ff8ccd5-7nmcp_acd3a85c-bdae-4262-a8a0-3da693230a86/operator/0.log" Feb 18 02:04:31 crc kubenswrapper[4791]: I0218 02:04:31.591943 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-hccl8_38a12dc2-60bc-48f8-9597-b5e899ab2971/registry-server/0.log" Feb 18 02:04:32 crc kubenswrapper[4791]: I0218 02:04:32.084895 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-d44cf6b75-97ckn_2c9de6b9-8cd5-4082-b89e-88958f7cb27e/manager/0.log" Feb 18 02:04:32 crc kubenswrapper[4791]: I0218 02:04:32.332518 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-8497b45c89-47vp7_35eeab77-8059-4f0c-8742-3f72c2ffab54/manager/0.log" Feb 18 02:04:32 crc kubenswrapper[4791]: I0218 02:04:32.604352 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-wl9hf_692a8f45-9351-4d64-9571-20f46a3bd0ba/operator/0.log" Feb 18 02:04:32 crc kubenswrapper[4791]: I0218 02:04:32.784587 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68f46476f-5tvb7_a3e4d5ea-ce2c-4d6b-93bd-f67a88f996a6/manager/0.log" Feb 18 02:04:33 crc kubenswrapper[4791]: I0218 02:04:33.251851 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7866795846-bx4hv_819eaf55-1011-4861-bca0-7aecb14098c3/manager/0.log" Feb 18 02:04:33 crc kubenswrapper[4791]: I0218 02:04:33.345209 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-669759659c-xt6gk_0d602aa9-6246-4b05-8a6c-7d3cfb607a36/manager/0.log" Feb 18 02:04:33 crc kubenswrapper[4791]: I0218 02:04:33.593930 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5db88f68c-sjggr_38f179b0-7f33-4d39-aa94-2ca4292ac94e/manager/0.log" Feb 18 02:04:33 crc kubenswrapper[4791]: I0218 02:04:33.627544 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-c6f9cb8b-mnkq9_ea2ae9de-8373-4a24-bbe8-2308ecc8dad2/manager/0.log" Feb 18 02:04:34 crc kubenswrapper[4791]: I0218 02:04:34.084326 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69f8888797-gz2td_be30d6a9-7cd4-482d-a12d-e21de55366c1/manager/0.log" Feb 18 02:04:40 crc kubenswrapper[4791]: I0218 02:04:40.060931 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:04:40 crc kubenswrapper[4791]: E0218 02:04:40.061698 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:04:40 crc kubenswrapper[4791]: E0218 02:04:40.064623 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:04:40 crc kubenswrapper[4791]: I0218 02:04:40.322864 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-868647ff47-kcd8v_189a189d-5801-4406-8450-1cff37f84bbb/manager/0.log" Feb 18 02:04:41 crc kubenswrapper[4791]: E0218 02:04:41.063577 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:04:51 crc kubenswrapper[4791]: I0218 02:04:51.061719 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:04:51 crc kubenswrapper[4791]: E0218 02:04:51.062574 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:04:52 crc kubenswrapper[4791]: E0218 02:04:52.064266 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:04:53 crc kubenswrapper[4791]: E0218 02:04:53.063518 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:04:58 crc kubenswrapper[4791]: I0218 02:04:58.326227 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-82c2d_a4f1858f-9974-4098-a264-b981c587623b/control-plane-machine-set-operator/0.log" Feb 18 02:04:58 crc kubenswrapper[4791]: I0218 02:04:58.544783 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rwmnp_f5c71b3e-957e-466a-97c6-b114ee0eea13/machine-api-operator/0.log" Feb 18 02:04:58 crc kubenswrapper[4791]: I0218 02:04:58.595863 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rwmnp_f5c71b3e-957e-466a-97c6-b114ee0eea13/kube-rbac-proxy/0.log" Feb 18 02:05:03 crc kubenswrapper[4791]: I0218 02:05:03.061611 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:05:03 crc kubenswrapper[4791]: E0218 02:05:03.062476 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:05:05 crc kubenswrapper[4791]: E0218 02:05:05.063329 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:05:05 crc kubenswrapper[4791]: E0218 02:05:05.063770 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:05:14 crc kubenswrapper[4791]: I0218 02:05:14.180254 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-pt89z_d18c7aea-395c-4dc2-a8be-885128a49c08/cert-manager-controller/0.log" Feb 18 02:05:14 crc kubenswrapper[4791]: I0218 02:05:14.370314 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-8fnj2_7dfbc107-3119-4d6c-94a7-f968eadcfa14/cert-manager-cainjector/0.log" Feb 18 02:05:14 crc kubenswrapper[4791]: I0218 02:05:14.411421 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-hk8hj_290b1843-b2ad-42d9-ba7e-0c58aa50cc00/cert-manager-webhook/0.log" Feb 18 02:05:16 crc kubenswrapper[4791]: E0218 02:05:16.064027 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:05:17 crc kubenswrapper[4791]: I0218 02:05:17.061665 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:05:17 crc kubenswrapper[4791]: E0218 02:05:17.061986 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:05:19 crc kubenswrapper[4791]: E0218 02:05:19.073529 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:05:27 crc kubenswrapper[4791]: E0218 02:05:27.064524 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.392938 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5c78fc5d65-56dx7_0f6d4b29-736a-4142-92e2-6ce0f0066a66/nmstate-console-plugin/0.log" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.596428 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-b79fm_e2e2c268-5347-4124-9bd5-276f07431108/nmstate-handler/0.log" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.658885 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58c85c668d-9t458_28664980-fdb0-453b-be75-c2d6c758c97e/kube-rbac-proxy/0.log" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.724800 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-58c85c668d-9t458_28664980-fdb0-453b-be75-c2d6c758c97e/nmstate-metrics/0.log" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.814548 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-694c9596b7-s67t6_3ed4fbe3-f078-4fd2-91fd-ba4b154c34c5/nmstate-operator/0.log" Feb 18 02:05:28 crc kubenswrapper[4791]: I0218 02:05:28.925846 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-866bcb46dc-9zxzm_d73fe398-974d-41d9-97e7-78287a263d67/nmstate-webhook/0.log" Feb 18 02:05:30 crc kubenswrapper[4791]: E0218 02:05:30.086107 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:05:31 crc kubenswrapper[4791]: I0218 02:05:31.062831 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:05:31 crc kubenswrapper[4791]: E0218 02:05:31.063345 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:05:41 crc kubenswrapper[4791]: E0218 02:05:41.064922 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:05:44 crc kubenswrapper[4791]: I0218 02:05:44.061247 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:05:44 crc kubenswrapper[4791]: E0218 02:05:44.062081 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:05:44 crc kubenswrapper[4791]: I0218 02:05:44.085816 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-fb6df79fb-5x4qj_ea096b74-bf68-4755-8827-cbcd680241c9/kube-rbac-proxy/0.log" Feb 18 02:05:44 crc kubenswrapper[4791]: I0218 02:05:44.096511 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-fb6df79fb-5x4qj_ea096b74-bf68-4755-8827-cbcd680241c9/manager/0.log" Feb 18 02:05:45 crc kubenswrapper[4791]: E0218 02:05:45.066167 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:05:55 crc kubenswrapper[4791]: I0218 02:05:55.062054 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:05:55 crc kubenswrapper[4791]: E0218 02:05:55.063246 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:05:55 crc kubenswrapper[4791]: E0218 02:05:55.065269 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:05:57 crc kubenswrapper[4791]: E0218 02:05:57.063731 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:05:59 crc kubenswrapper[4791]: I0218 02:05:59.402577 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-tqvq6_9179defa-6414-46af-857c-9459169745e5/prometheus-operator/0.log" Feb 18 02:05:59 crc kubenswrapper[4791]: I0218 02:05:59.573265 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_01b8edc9-ae6f-42d9-8f91-d7918bb3959f/prometheus-operator-admission-webhook/0.log" Feb 18 02:05:59 crc kubenswrapper[4791]: I0218 02:05:59.657374 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa/prometheus-operator-admission-webhook/0.log" Feb 18 02:05:59 crc kubenswrapper[4791]: I0218 02:05:59.816628 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-tdc8p_503c2fbe-9550-4943-bdf7-ce6372a20de2/operator/0.log" Feb 18 02:05:59 crc kubenswrapper[4791]: I0218 02:05:59.863784 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-z78rw_617ebaca-caf6-45cf-92b1-f1bb067bf2f1/observability-ui-dashboards/0.log" Feb 18 02:06:00 crc kubenswrapper[4791]: I0218 02:06:00.009974 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-dp9pl_d77ea4ed-d272-4d64-83a8-b849f88861d1/perses-operator/0.log" Feb 18 02:06:06 crc kubenswrapper[4791]: I0218 02:06:06.061965 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:06:06 crc kubenswrapper[4791]: E0218 02:06:06.062966 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:06:08 crc kubenswrapper[4791]: E0218 02:06:08.063629 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:06:10 crc kubenswrapper[4791]: E0218 02:06:10.063790 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:06:15 crc kubenswrapper[4791]: I0218 02:06:15.798998 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-c769fd969-plvzd_ae298045-e94b-47f8-8404-e29637b418ce/cluster-logging-operator/0.log" Feb 18 02:06:15 crc kubenswrapper[4791]: I0218 02:06:15.969805 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-qz8dc_48f38038-db15-42ad-a1b0-9a9814248bde/collector/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.049181 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_4c3e6c67-da97-4c01-bca7-ed995bc20255/loki-compactor/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.141471 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-5d5548c9f5-t9t9m_181d182c-32fc-497d-b84b-a127338caae4/loki-distributor/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.247296 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f46df4b65-hbtdt_49f1d891-d5f2-4c56-af09-542c600ddb15/gateway/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.282753 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f46df4b65-hbtdt_49f1d891-d5f2-4c56-af09-542c600ddb15/opa/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.433076 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f46df4b65-plgfh_53934528-3cf8-46d2-856a-1d1deef2bd01/gateway/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.452857 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f46df4b65-plgfh_53934528-3cf8-46d2-856a-1d1deef2bd01/opa/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.531210 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_5e9ff18e-de75-40c3-9a73-6cad6b406d1c/loki-index-gateway/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.678970 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_853d966d-3d6c-4fbf-9c83-62148305ee9e/loki-ingester/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.746947 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-76bf7b6d45-mhn8q_82108d8c-ce91-4d2f-ab31-59fa33cb1813/loki-querier/0.log" Feb 18 02:06:16 crc kubenswrapper[4791]: I0218 02:06:16.870644 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-6d6859c548-qlq5c_33a1ba78-c845-421b-9404-1f56402fc29a/loki-query-frontend/0.log" Feb 18 02:06:20 crc kubenswrapper[4791]: E0218 02:06:20.063341 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:06:21 crc kubenswrapper[4791]: I0218 02:06:21.062215 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:06:21 crc kubenswrapper[4791]: E0218 02:06:21.062778 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:06:21 crc kubenswrapper[4791]: E0218 02:06:21.063734 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:06:32 crc kubenswrapper[4791]: I0218 02:06:32.836864 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-69bbfbf88f-7c2t9_2168cd4c-c18b-4d6e-be03-24a4749b9c66/kube-rbac-proxy/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.057042 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-69bbfbf88f-7c2t9_2168cd4c-c18b-4d6e-be03-24a4749b9c66/controller/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.157254 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-frr-files/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.257723 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-frr-files/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.288013 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-reloader/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.312675 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-metrics/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.313764 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-reloader/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.465374 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-frr-files/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.508682 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-metrics/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.529682 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-reloader/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.548324 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-metrics/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.677386 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-frr-files/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.698707 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-metrics/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.700437 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/cp-reloader/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.736248 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/controller/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.869635 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/kube-rbac-proxy/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.872763 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/frr-metrics/0.log" Feb 18 02:06:33 crc kubenswrapper[4791]: I0218 02:06:33.937301 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/kube-rbac-proxy-frr/0.log" Feb 18 02:06:34 crc kubenswrapper[4791]: E0218 02:06:34.064114 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:06:34 crc kubenswrapper[4791]: I0218 02:06:34.088728 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/reloader/0.log" Feb 18 02:06:34 crc kubenswrapper[4791]: I0218 02:06:34.165982 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-78b44bf5bb-h4g9l_799e6275-aff5-49be-9af2-3f4187055abc/frr-k8s-webhook-server/0.log" Feb 18 02:06:34 crc kubenswrapper[4791]: I0218 02:06:34.355822 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-f9fdbc69b-szb26_1d334193-b713-438c-a889-9a58c82e980d/manager/0.log" Feb 18 02:06:34 crc kubenswrapper[4791]: I0218 02:06:34.554183 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tc6ln_2da8a059-b58c-4390-b09c-87158939e07f/kube-rbac-proxy/0.log" Feb 18 02:06:34 crc kubenswrapper[4791]: I0218 02:06:34.556634 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-67d9bcb746-9bq8h_e884fd03-f3e4-4a90-833a-af7afbf80be3/webhook-server/0.log" Feb 18 02:06:35 crc kubenswrapper[4791]: E0218 02:06:35.062399 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:06:35 crc kubenswrapper[4791]: I0218 02:06:35.353514 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tc6ln_2da8a059-b58c-4390-b09c-87158939e07f/speaker/0.log" Feb 18 02:06:35 crc kubenswrapper[4791]: I0218 02:06:35.688605 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gkll6_a5d9d585-69e8-4028-9939-a3c5e1f875fe/frr/0.log" Feb 18 02:06:36 crc kubenswrapper[4791]: I0218 02:06:36.062301 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:06:36 crc kubenswrapper[4791]: E0218 02:06:36.062588 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:06:49 crc kubenswrapper[4791]: E0218 02:06:49.072975 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:06:49 crc kubenswrapper[4791]: E0218 02:06:49.073302 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.256753 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/util/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.463880 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/pull/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.477108 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/pull/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.478898 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/util/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.672059 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/util/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.684553 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/pull/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.731217 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_371ee4810f5f68c5176d7257cefd8758df33c232524c25acbf90f69e19cd857_a6691e14-8a04-46ac-94cb-cbcfa2894bb8/extract/0.log" Feb 18 02:06:50 crc kubenswrapper[4791]: I0218 02:06:50.849871 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.062191 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:06:51 crc kubenswrapper[4791]: E0218 02:06:51.062480 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.082971 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.127106 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.152217 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.316512 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.393045 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/extract/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.400145 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvkrs_83933a55-a803-449b-8b3c-57ea3692f403/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.525893 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.693650 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.705350 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.739585 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.939145 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/util/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.939856 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/pull/0.log" Feb 18 02:06:51 crc kubenswrapper[4791]: I0218 02:06:51.945124 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a9b3ed1fe9273b725119dcfb777257f08e39bbefccdf592dce2d0dc213w5vk2_40f169fa-a16d-48fd-aca9-881183df5077/extract/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.143267 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-utilities/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.317876 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-content/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.325359 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-utilities/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.340211 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-content/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.571368 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-content/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.716168 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/extract-utilities/0.log" Feb 18 02:06:52 crc kubenswrapper[4791]: I0218 02:06:52.833187 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-utilities/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.062629 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-utilities/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.115121 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-content/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.156099 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-content/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.186667 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-2g4lz_f90a5f9e-114f-4b1c-83fb-698657f3845a/registry-server/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.372708 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-utilities/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.390639 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/extract-content/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.629146 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-kwhh6_777a351a-ca39-46f0-912f-fbbda9651efe/registry-server/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.629207 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/util/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.780712 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/pull/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.809872 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/pull/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.810402 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/util/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.973433 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/util/0.log" Feb 18 02:06:53 crc kubenswrapper[4791]: I0218 02:06:53.994172 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/extract/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.029734 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/util/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.046598 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e2b87168fae98cca1c2d05d26ceb83b1b30b4b54c6968a79bb91e089899v296_354c26d0-70dc-44dd-a18c-362d7fa5b5e9/pull/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.225518 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/util/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.232223 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/pull/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.257987 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/pull/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.419679 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/util/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.441614 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/pull/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.480379 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_f938df2ce267491f058ea7e3036e97ee3f65bf3665185b1a4f52323ecabncwd_4f6571ac-7426-452e-93f0-6f6d82d7bece/extract/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.500554 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2zpl9_2b61f101-7b72-4308-9c07-9d2e441f333c/marketplace-operator/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.617535 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-utilities/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.767842 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-content/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.804635 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-utilities/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.812431 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-content/0.log" Feb 18 02:06:54 crc kubenswrapper[4791]: I0218 02:06:54.954110 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-content/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.019523 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-utilities/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.050486 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/extract-utilities/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.163076 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hhjg7_cd6b21cf-4c8d-4f85-8587-3184b3063b8a/registry-server/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.276268 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-content/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.299380 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-content/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.300876 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-utilities/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.488933 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-utilities/0.log" Feb 18 02:06:55 crc kubenswrapper[4791]: I0218 02:06:55.519369 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/extract-content/0.log" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.202690 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gf4b2_498618d7-e10c-442d-9150-7cd04846d4d5/registry-server/0.log" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.644387 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:06:56 crc kubenswrapper[4791]: E0218 02:06:56.644837 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="registry-server" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.644850 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="registry-server" Feb 18 02:06:56 crc kubenswrapper[4791]: E0218 02:06:56.644867 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="extract-content" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.644873 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="extract-content" Feb 18 02:06:56 crc kubenswrapper[4791]: E0218 02:06:56.644901 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="extract-utilities" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.644907 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="extract-utilities" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.645204 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="97f48b91-2087-41cd-902d-75da9fa5ea8c" containerName="registry-server" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.647077 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.678362 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.727831 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.728370 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm4kn\" (UniqueName: \"kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.728551 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.831638 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm4kn\" (UniqueName: \"kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.832109 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.832276 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.832629 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.832797 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:56 crc kubenswrapper[4791]: I0218 02:06:56.857310 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm4kn\" (UniqueName: \"kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn\") pod \"redhat-marketplace-qgvss\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:57 crc kubenswrapper[4791]: I0218 02:06:57.006919 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:06:57 crc kubenswrapper[4791]: I0218 02:06:57.556932 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:06:58 crc kubenswrapper[4791]: I0218 02:06:58.329346 4791 generic.go:334] "Generic (PLEG): container finished" podID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerID="10c9e48443c713d146d379dfcc77c03fec5498c7ea86a55468a0beeb1d69aba3" exitCode=0 Feb 18 02:06:58 crc kubenswrapper[4791]: I0218 02:06:58.329449 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerDied","Data":"10c9e48443c713d146d379dfcc77c03fec5498c7ea86a55468a0beeb1d69aba3"} Feb 18 02:06:58 crc kubenswrapper[4791]: I0218 02:06:58.329768 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerStarted","Data":"4f35b4878bb478504acf9dad2626d440f8ba123b85c16803c3f7418b01897cd7"} Feb 18 02:07:00 crc kubenswrapper[4791]: I0218 02:07:00.352666 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerStarted","Data":"7e919a64b4bc7a40d3229d98e75d5cc19344c228ff9abed470dea69cd951fae1"} Feb 18 02:07:01 crc kubenswrapper[4791]: I0218 02:07:01.367995 4791 generic.go:334] "Generic (PLEG): container finished" podID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerID="7e919a64b4bc7a40d3229d98e75d5cc19344c228ff9abed470dea69cd951fae1" exitCode=0 Feb 18 02:07:01 crc kubenswrapper[4791]: I0218 02:07:01.368097 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerDied","Data":"7e919a64b4bc7a40d3229d98e75d5cc19344c228ff9abed470dea69cd951fae1"} Feb 18 02:07:02 crc kubenswrapper[4791]: E0218 02:07:02.062684 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:07:02 crc kubenswrapper[4791]: I0218 02:07:02.379957 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerStarted","Data":"a1a9eecc9fc83f641980ece45807651b4398622aadc5fdbf33eb5da08883bf1e"} Feb 18 02:07:02 crc kubenswrapper[4791]: I0218 02:07:02.415531 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qgvss" podStartSLOduration=2.95397032 podStartE2EDuration="6.415509128s" podCreationTimestamp="2026-02-18 02:06:56 +0000 UTC" firstStartedPulling="2026-02-18 02:06:58.331373895 +0000 UTC m=+5559.899387065" lastFinishedPulling="2026-02-18 02:07:01.792912703 +0000 UTC m=+5563.360925873" observedRunningTime="2026-02-18 02:07:02.406363786 +0000 UTC m=+5563.974376956" watchObservedRunningTime="2026-02-18 02:07:02.415509128 +0000 UTC m=+5563.983522308" Feb 18 02:07:04 crc kubenswrapper[4791]: E0218 02:07:04.064586 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:07:06 crc kubenswrapper[4791]: I0218 02:07:06.061342 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:07:06 crc kubenswrapper[4791]: E0218 02:07:06.061876 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:07:07 crc kubenswrapper[4791]: I0218 02:07:07.007178 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:07 crc kubenswrapper[4791]: I0218 02:07:07.007240 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:07 crc kubenswrapper[4791]: I0218 02:07:07.072819 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:07 crc kubenswrapper[4791]: I0218 02:07:07.510676 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.603000 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-bc4568dfd-vs6tx_2d01089e-d6f0-4bfb-8ad1-5269e8a912fa/prometheus-operator-admission-webhook/0.log" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.616963 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-bc4568dfd-4qf5j_01b8edc9-ae6f-42d9-8f91-d7918bb3959f/prometheus-operator-admission-webhook/0.log" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.686054 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-tqvq6_9179defa-6414-46af-857c-9459169745e5/prometheus-operator/0.log" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.803803 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-z78rw_617ebaca-caf6-45cf-92b1-f1bb067bf2f1/observability-ui-dashboards/0.log" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.831132 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-tdc8p_503c2fbe-9550-4943-bdf7-ce6372a20de2/operator/0.log" Feb 18 02:07:11 crc kubenswrapper[4791]: I0218 02:07:11.877107 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-dp9pl_d77ea4ed-d272-4d64-83a8-b849f88861d1/perses-operator/0.log" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.230330 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.230933 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qgvss" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="registry-server" containerID="cri-o://a1a9eecc9fc83f641980ece45807651b4398622aadc5fdbf33eb5da08883bf1e" gracePeriod=2 Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.538591 4791 generic.go:334] "Generic (PLEG): container finished" podID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerID="a1a9eecc9fc83f641980ece45807651b4398622aadc5fdbf33eb5da08883bf1e" exitCode=0 Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.538646 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerDied","Data":"a1a9eecc9fc83f641980ece45807651b4398622aadc5fdbf33eb5da08883bf1e"} Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.709188 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.788271 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content\") pod \"2bdff1e1-fe38-4659-bd28-23ed1297df20\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.788445 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities\") pod \"2bdff1e1-fe38-4659-bd28-23ed1297df20\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.788478 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm4kn\" (UniqueName: \"kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn\") pod \"2bdff1e1-fe38-4659-bd28-23ed1297df20\" (UID: \"2bdff1e1-fe38-4659-bd28-23ed1297df20\") " Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.789454 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities" (OuterVolumeSpecName: "utilities") pod "2bdff1e1-fe38-4659-bd28-23ed1297df20" (UID: "2bdff1e1-fe38-4659-bd28-23ed1297df20"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.789679 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.809380 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn" (OuterVolumeSpecName: "kube-api-access-bm4kn") pod "2bdff1e1-fe38-4659-bd28-23ed1297df20" (UID: "2bdff1e1-fe38-4659-bd28-23ed1297df20"). InnerVolumeSpecName "kube-api-access-bm4kn". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.814467 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2bdff1e1-fe38-4659-bd28-23ed1297df20" (UID: "2bdff1e1-fe38-4659-bd28-23ed1297df20"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.891528 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bdff1e1-fe38-4659-bd28-23ed1297df20-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:07:13 crc kubenswrapper[4791]: I0218 02:07:13.891567 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm4kn\" (UniqueName: \"kubernetes.io/projected/2bdff1e1-fe38-4659-bd28-23ed1297df20-kube-api-access-bm4kn\") on node \"crc\" DevicePath \"\"" Feb 18 02:07:14 crc kubenswrapper[4791]: E0218 02:07:14.063623 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.555828 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgvss" event={"ID":"2bdff1e1-fe38-4659-bd28-23ed1297df20","Type":"ContainerDied","Data":"4f35b4878bb478504acf9dad2626d440f8ba123b85c16803c3f7418b01897cd7"} Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.556231 4791 scope.go:117] "RemoveContainer" containerID="a1a9eecc9fc83f641980ece45807651b4398622aadc5fdbf33eb5da08883bf1e" Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.556428 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgvss" Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.591924 4791 scope.go:117] "RemoveContainer" containerID="7e919a64b4bc7a40d3229d98e75d5cc19344c228ff9abed470dea69cd951fae1" Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.600130 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.614027 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgvss"] Feb 18 02:07:14 crc kubenswrapper[4791]: I0218 02:07:14.616655 4791 scope.go:117] "RemoveContainer" containerID="10c9e48443c713d146d379dfcc77c03fec5498c7ea86a55468a0beeb1d69aba3" Feb 18 02:07:15 crc kubenswrapper[4791]: I0218 02:07:15.073068 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" path="/var/lib/kubelet/pods/2bdff1e1-fe38-4659-bd28-23ed1297df20/volumes" Feb 18 02:07:17 crc kubenswrapper[4791]: I0218 02:07:17.066004 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:07:17 crc kubenswrapper[4791]: E0218 02:07:17.066935 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-bhfmv_openshift-machine-config-operator(0b31a333-8f95-459c-8135-e91e557c4c85)\"" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" Feb 18 02:07:18 crc kubenswrapper[4791]: E0218 02:07:18.065273 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:07:25 crc kubenswrapper[4791]: E0218 02:07:25.065297 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:07:26 crc kubenswrapper[4791]: I0218 02:07:26.887891 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-fb6df79fb-5x4qj_ea096b74-bf68-4755-8827-cbcd680241c9/kube-rbac-proxy/0.log" Feb 18 02:07:26 crc kubenswrapper[4791]: I0218 02:07:26.952910 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-fb6df79fb-5x4qj_ea096b74-bf68-4755-8827-cbcd680241c9/manager/0.log" Feb 18 02:07:29 crc kubenswrapper[4791]: I0218 02:07:29.081916 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:07:29 crc kubenswrapper[4791]: I0218 02:07:29.729480 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"355003f233a8836345e8bb1d34c34e7a38178d305c6ad2415c31389a63d9f561"} Feb 18 02:07:32 crc kubenswrapper[4791]: E0218 02:07:32.063185 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:07:39 crc kubenswrapper[4791]: E0218 02:07:39.077378 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:07:45 crc kubenswrapper[4791]: E0218 02:07:45.066721 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:07:50 crc kubenswrapper[4791]: E0218 02:07:50.065702 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:07:51 crc kubenswrapper[4791]: E0218 02:07:51.961375 4791 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.113:48644->38.102.83.113:45329: write tcp 38.102.83.113:48644->38.102.83.113:45329: write: broken pipe Feb 18 02:07:54 crc kubenswrapper[4791]: E0218 02:07:54.895239 4791 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.113:48708->38.102.83.113:45329: write tcp 38.102.83.113:48708->38.102.83.113:45329: write: broken pipe Feb 18 02:08:00 crc kubenswrapper[4791]: E0218 02:08:00.063448 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:08:05 crc kubenswrapper[4791]: I0218 02:08:05.069214 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 02:08:05 crc kubenswrapper[4791]: E0218 02:08:05.191982 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:08:05 crc kubenswrapper[4791]: E0218 02:08:05.192065 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:08:05 crc kubenswrapper[4791]: E0218 02:08:05.192234 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:08:05 crc kubenswrapper[4791]: E0218 02:08:05.193506 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:08:13 crc kubenswrapper[4791]: E0218 02:08:13.065336 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:08:20 crc kubenswrapper[4791]: E0218 02:08:20.063312 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:08:25 crc kubenswrapper[4791]: E0218 02:08:25.066307 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:08:32 crc kubenswrapper[4791]: E0218 02:08:32.063937 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:08:35 crc kubenswrapper[4791]: I0218 02:08:35.265308 4791 scope.go:117] "RemoveContainer" containerID="ea2f1591e971c8e16289d17943f302688cbb64e04a3bdd995372da05a06ccadd" Feb 18 02:08:36 crc kubenswrapper[4791]: E0218 02:08:36.191642 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:08:36 crc kubenswrapper[4791]: E0218 02:08:36.192009 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:08:36 crc kubenswrapper[4791]: E0218 02:08:36.192229 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:08:36 crc kubenswrapper[4791]: E0218 02:08:36.193691 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:08:43 crc kubenswrapper[4791]: E0218 02:08:43.062780 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:08:51 crc kubenswrapper[4791]: E0218 02:08:51.065422 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:08:56 crc kubenswrapper[4791]: E0218 02:08:56.063852 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:09:05 crc kubenswrapper[4791]: E0218 02:09:05.064904 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:09:07 crc kubenswrapper[4791]: E0218 02:09:07.063375 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:09:18 crc kubenswrapper[4791]: I0218 02:09:18.022899 4791 generic.go:334] "Generic (PLEG): container finished" podID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerID="e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3" exitCode=0 Feb 18 02:09:18 crc kubenswrapper[4791]: I0218 02:09:18.022969 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6hkj/must-gather-q9txg" event={"ID":"4831bc39-bb36-411b-a692-bf5d10b12d0d","Type":"ContainerDied","Data":"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3"} Feb 18 02:09:18 crc kubenswrapper[4791]: I0218 02:09:18.025451 4791 scope.go:117] "RemoveContainer" containerID="e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3" Feb 18 02:09:18 crc kubenswrapper[4791]: E0218 02:09:18.067470 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:09:18 crc kubenswrapper[4791]: I0218 02:09:18.224027 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6hkj_must-gather-q9txg_4831bc39-bb36-411b-a692-bf5d10b12d0d/gather/0.log" Feb 18 02:09:20 crc kubenswrapper[4791]: E0218 02:09:20.063602 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:09:25 crc kubenswrapper[4791]: I0218 02:09:25.772424 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6hkj/must-gather-q9txg"] Feb 18 02:09:25 crc kubenswrapper[4791]: I0218 02:09:25.773483 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-w6hkj/must-gather-q9txg" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="copy" containerID="cri-o://7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd" gracePeriod=2 Feb 18 02:09:25 crc kubenswrapper[4791]: I0218 02:09:25.785467 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6hkj/must-gather-q9txg"] Feb 18 02:09:26 crc kubenswrapper[4791]: I0218 02:09:26.864349 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6hkj_must-gather-q9txg_4831bc39-bb36-411b-a692-bf5d10b12d0d/copy/0.log" Feb 18 02:09:26 crc kubenswrapper[4791]: I0218 02:09:26.865279 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:09:26 crc kubenswrapper[4791]: I0218 02:09:26.967117 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output\") pod \"4831bc39-bb36-411b-a692-bf5d10b12d0d\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " Feb 18 02:09:26 crc kubenswrapper[4791]: I0218 02:09:26.967203 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bgjl\" (UniqueName: \"kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl\") pod \"4831bc39-bb36-411b-a692-bf5d10b12d0d\" (UID: \"4831bc39-bb36-411b-a692-bf5d10b12d0d\") " Feb 18 02:09:26 crc kubenswrapper[4791]: I0218 02:09:26.974944 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl" (OuterVolumeSpecName: "kube-api-access-9bgjl") pod "4831bc39-bb36-411b-a692-bf5d10b12d0d" (UID: "4831bc39-bb36-411b-a692-bf5d10b12d0d"). InnerVolumeSpecName "kube-api-access-9bgjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.070118 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bgjl\" (UniqueName: \"kubernetes.io/projected/4831bc39-bb36-411b-a692-bf5d10b12d0d-kube-api-access-9bgjl\") on node \"crc\" DevicePath \"\"" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.122326 4791 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6hkj_must-gather-q9txg_4831bc39-bb36-411b-a692-bf5d10b12d0d/copy/0.log" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.127466 4791 generic.go:334] "Generic (PLEG): container finished" podID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerID="7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd" exitCode=143 Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.127517 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6hkj/must-gather-q9txg" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.127538 4791 scope.go:117] "RemoveContainer" containerID="7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.148538 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "4831bc39-bb36-411b-a692-bf5d10b12d0d" (UID: "4831bc39-bb36-411b-a692-bf5d10b12d0d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.172374 4791 scope.go:117] "RemoveContainer" containerID="e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.175046 4791 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4831bc39-bb36-411b-a692-bf5d10b12d0d-must-gather-output\") on node \"crc\" DevicePath \"\"" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.232454 4791 scope.go:117] "RemoveContainer" containerID="7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd" Feb 18 02:09:27 crc kubenswrapper[4791]: E0218 02:09:27.232988 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd\": container with ID starting with 7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd not found: ID does not exist" containerID="7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.233036 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd"} err="failed to get container status \"7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd\": rpc error: code = NotFound desc = could not find container \"7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd\": container with ID starting with 7a18e37ae395a6fceb0d6d557e26b5279ee75a1edb13c8dd0976d4768eccf9cd not found: ID does not exist" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.233082 4791 scope.go:117] "RemoveContainer" containerID="e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3" Feb 18 02:09:27 crc kubenswrapper[4791]: E0218 02:09:27.233375 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3\": container with ID starting with e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3 not found: ID does not exist" containerID="e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3" Feb 18 02:09:27 crc kubenswrapper[4791]: I0218 02:09:27.233401 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3"} err="failed to get container status \"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3\": rpc error: code = NotFound desc = could not find container \"e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3\": container with ID starting with e6e503861532259da680c851d30a05e25050ce715d544d54341cb0e054568ee3 not found: ID does not exist" Feb 18 02:09:29 crc kubenswrapper[4791]: I0218 02:09:29.074417 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" path="/var/lib/kubelet/pods/4831bc39-bb36-411b-a692-bf5d10b12d0d/volumes" Feb 18 02:09:33 crc kubenswrapper[4791]: E0218 02:09:33.063445 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:09:35 crc kubenswrapper[4791]: E0218 02:09:35.064180 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:09:35 crc kubenswrapper[4791]: I0218 02:09:35.362031 4791 scope.go:117] "RemoveContainer" containerID="c0fdeb6fbfcaeb281f0b4259e0c082b443aeece5503f3230b10bb9363184534d" Feb 18 02:09:35 crc kubenswrapper[4791]: I0218 02:09:35.392143 4791 scope.go:117] "RemoveContainer" containerID="b7eff96e24e9dc0ae8532eccb2d78fcd11c5e2fe83a0f36ff2304fd45a9597a9" Feb 18 02:09:35 crc kubenswrapper[4791]: I0218 02:09:35.475494 4791 scope.go:117] "RemoveContainer" containerID="67f90259a12d576e6b2566e813ad79543be4484d4b163e6096401a9a2f2673cb" Feb 18 02:09:47 crc kubenswrapper[4791]: E0218 02:09:47.065612 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:09:47 crc kubenswrapper[4791]: E0218 02:09:47.065657 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:09:56 crc kubenswrapper[4791]: I0218 02:09:56.800599 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:09:56 crc kubenswrapper[4791]: I0218 02:09:56.801313 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:09:59 crc kubenswrapper[4791]: E0218 02:09:59.085347 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:10:01 crc kubenswrapper[4791]: E0218 02:10:01.064304 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:10:15 crc kubenswrapper[4791]: E0218 02:10:15.062834 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:10:15 crc kubenswrapper[4791]: E0218 02:10:15.062838 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:10:26 crc kubenswrapper[4791]: I0218 02:10:26.800260 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:10:26 crc kubenswrapper[4791]: I0218 02:10:26.800865 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:10:28 crc kubenswrapper[4791]: E0218 02:10:28.066097 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:10:30 crc kubenswrapper[4791]: E0218 02:10:30.063610 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:10:39 crc kubenswrapper[4791]: E0218 02:10:39.071128 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:10:41 crc kubenswrapper[4791]: E0218 02:10:41.063571 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:10:52 crc kubenswrapper[4791]: E0218 02:10:52.063449 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:10:52 crc kubenswrapper[4791]: E0218 02:10:52.063540 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:10:56 crc kubenswrapper[4791]: I0218 02:10:56.799584 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:10:56 crc kubenswrapper[4791]: I0218 02:10:56.800237 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:10:56 crc kubenswrapper[4791]: I0218 02:10:56.800314 4791 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" Feb 18 02:10:56 crc kubenswrapper[4791]: I0218 02:10:56.801431 4791 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"355003f233a8836345e8bb1d34c34e7a38178d305c6ad2415c31389a63d9f561"} pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 18 02:10:56 crc kubenswrapper[4791]: I0218 02:10:56.801503 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" containerID="cri-o://355003f233a8836345e8bb1d34c34e7a38178d305c6ad2415c31389a63d9f561" gracePeriod=600 Feb 18 02:10:57 crc kubenswrapper[4791]: I0218 02:10:57.090702 4791 generic.go:334] "Generic (PLEG): container finished" podID="0b31a333-8f95-459c-8135-e91e557c4c85" containerID="355003f233a8836345e8bb1d34c34e7a38178d305c6ad2415c31389a63d9f561" exitCode=0 Feb 18 02:10:57 crc kubenswrapper[4791]: I0218 02:10:57.090912 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerDied","Data":"355003f233a8836345e8bb1d34c34e7a38178d305c6ad2415c31389a63d9f561"} Feb 18 02:10:57 crc kubenswrapper[4791]: I0218 02:10:57.091109 4791 scope.go:117] "RemoveContainer" containerID="c910b6cc87a9dcc9eddfba49c0ce2c47da01bea9873b2e6847f3f6b997395cc8" Feb 18 02:10:58 crc kubenswrapper[4791]: I0218 02:10:58.118139 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" event={"ID":"0b31a333-8f95-459c-8135-e91e557c4c85","Type":"ContainerStarted","Data":"376c7c04928a9dc22eea1ce3a6f2d925bc8e687d0110097cb53b48a267a9db5e"} Feb 18 02:11:05 crc kubenswrapper[4791]: E0218 02:11:05.064458 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.260216 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:06 crc kubenswrapper[4791]: E0218 02:11:06.261346 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="copy" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261364 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="copy" Feb 18 02:11:06 crc kubenswrapper[4791]: E0218 02:11:06.261381 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="extract-content" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261388 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="extract-content" Feb 18 02:11:06 crc kubenswrapper[4791]: E0218 02:11:06.261417 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="registry-server" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261423 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="registry-server" Feb 18 02:11:06 crc kubenswrapper[4791]: E0218 02:11:06.261433 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="gather" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261440 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="gather" Feb 18 02:11:06 crc kubenswrapper[4791]: E0218 02:11:06.261456 4791 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="extract-utilities" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261464 4791 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="extract-utilities" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261713 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bdff1e1-fe38-4659-bd28-23ed1297df20" containerName="registry-server" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261729 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="copy" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.261752 4791 memory_manager.go:354] "RemoveStaleState removing state" podUID="4831bc39-bb36-411b-a692-bf5d10b12d0d" containerName="gather" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.263954 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.290895 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.383694 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcmq6\" (UniqueName: \"kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.384344 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.384666 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.488297 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.488474 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcmq6\" (UniqueName: \"kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.488583 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.488975 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.489099 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.518424 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcmq6\" (UniqueName: \"kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6\") pod \"certified-operators-txstw\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:06 crc kubenswrapper[4791]: I0218 02:11:06.644790 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:07 crc kubenswrapper[4791]: E0218 02:11:07.063756 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:11:07 crc kubenswrapper[4791]: I0218 02:11:07.209009 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:07 crc kubenswrapper[4791]: I0218 02:11:07.245912 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerStarted","Data":"cf9ec7636dc1b0a8f8f0ac263480aa0c324d54563821eb7569caa03a69b42bbf"} Feb 18 02:11:08 crc kubenswrapper[4791]: I0218 02:11:08.262515 4791 generic.go:334] "Generic (PLEG): container finished" podID="c9986b64-c5fc-455f-b4f5-d71dd51c63f2" containerID="cebd69712060716e45df01a72c8a871d988c164ddf444b6784b56f40fc67b8b7" exitCode=0 Feb 18 02:11:08 crc kubenswrapper[4791]: I0218 02:11:08.262744 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerDied","Data":"cebd69712060716e45df01a72c8a871d988c164ddf444b6784b56f40fc67b8b7"} Feb 18 02:11:09 crc kubenswrapper[4791]: I0218 02:11:09.279811 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerStarted","Data":"450e02812412ec7999c2ab88f828df4f0b973d693b0242654a97ab5f8d2f97ed"} Feb 18 02:11:11 crc kubenswrapper[4791]: I0218 02:11:11.313226 4791 generic.go:334] "Generic (PLEG): container finished" podID="c9986b64-c5fc-455f-b4f5-d71dd51c63f2" containerID="450e02812412ec7999c2ab88f828df4f0b973d693b0242654a97ab5f8d2f97ed" exitCode=0 Feb 18 02:11:11 crc kubenswrapper[4791]: I0218 02:11:11.313459 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerDied","Data":"450e02812412ec7999c2ab88f828df4f0b973d693b0242654a97ab5f8d2f97ed"} Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.326905 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerStarted","Data":"a16780adafaf7de74953b817cb9e9d44efbead529141085998743690d43d33cd"} Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.360880 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-txstw" podStartSLOduration=2.874370753 podStartE2EDuration="6.36086011s" podCreationTimestamp="2026-02-18 02:11:06 +0000 UTC" firstStartedPulling="2026-02-18 02:11:08.266365607 +0000 UTC m=+5809.834378777" lastFinishedPulling="2026-02-18 02:11:11.752854954 +0000 UTC m=+5813.320868134" observedRunningTime="2026-02-18 02:11:12.349570992 +0000 UTC m=+5813.917584182" watchObservedRunningTime="2026-02-18 02:11:12.36086011 +0000 UTC m=+5813.928873280" Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.846947 4791 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.850004 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.863208 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.991212 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxznv\" (UniqueName: \"kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.991439 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:12 crc kubenswrapper[4791]: I0218 02:11:12.991977 4791 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.095680 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.095935 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.096010 4791 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxznv\" (UniqueName: \"kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.097071 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.097429 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.123492 4791 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxznv\" (UniqueName: \"kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv\") pod \"redhat-operators-llvmr\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.199883 4791 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:13 crc kubenswrapper[4791]: I0218 02:11:13.755833 4791 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:13 crc kubenswrapper[4791]: W0218 02:11:13.756289 4791 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88180350_bdaf_41fa_9333_ce46ca319e87.slice/crio-ea75230956824ab508e884609e246f66f224bcb252775953fa0809dc714a78ed WatchSource:0}: Error finding container ea75230956824ab508e884609e246f66f224bcb252775953fa0809dc714a78ed: Status 404 returned error can't find the container with id ea75230956824ab508e884609e246f66f224bcb252775953fa0809dc714a78ed Feb 18 02:11:14 crc kubenswrapper[4791]: I0218 02:11:14.351127 4791 generic.go:334] "Generic (PLEG): container finished" podID="88180350-bdaf-41fa-9333-ce46ca319e87" containerID="fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe" exitCode=0 Feb 18 02:11:14 crc kubenswrapper[4791]: I0218 02:11:14.351204 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerDied","Data":"fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe"} Feb 18 02:11:14 crc kubenswrapper[4791]: I0218 02:11:14.351468 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerStarted","Data":"ea75230956824ab508e884609e246f66f224bcb252775953fa0809dc714a78ed"} Feb 18 02:11:16 crc kubenswrapper[4791]: I0218 02:11:16.645548 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:16 crc kubenswrapper[4791]: I0218 02:11:16.646250 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:16 crc kubenswrapper[4791]: I0218 02:11:16.734527 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:17 crc kubenswrapper[4791]: I0218 02:11:17.447557 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:19 crc kubenswrapper[4791]: E0218 02:11:19.078571 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:11:19 crc kubenswrapper[4791]: E0218 02:11:19.079304 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:11:19 crc kubenswrapper[4791]: I0218 02:11:19.819192 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:19 crc kubenswrapper[4791]: I0218 02:11:19.819819 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-txstw" podUID="c9986b64-c5fc-455f-b4f5-d71dd51c63f2" containerName="registry-server" containerID="cri-o://a16780adafaf7de74953b817cb9e9d44efbead529141085998743690d43d33cd" gracePeriod=2 Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.446078 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerStarted","Data":"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459"} Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.450854 4791 generic.go:334] "Generic (PLEG): container finished" podID="c9986b64-c5fc-455f-b4f5-d71dd51c63f2" containerID="a16780adafaf7de74953b817cb9e9d44efbead529141085998743690d43d33cd" exitCode=0 Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.450951 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerDied","Data":"a16780adafaf7de74953b817cb9e9d44efbead529141085998743690d43d33cd"} Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.848710 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.916579 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcmq6\" (UniqueName: \"kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6\") pod \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.917127 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities\") pod \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.917458 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content\") pod \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\" (UID: \"c9986b64-c5fc-455f-b4f5-d71dd51c63f2\") " Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.921246 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities" (OuterVolumeSpecName: "utilities") pod "c9986b64-c5fc-455f-b4f5-d71dd51c63f2" (UID: "c9986b64-c5fc-455f-b4f5-d71dd51c63f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.924867 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6" (OuterVolumeSpecName: "kube-api-access-gcmq6") pod "c9986b64-c5fc-455f-b4f5-d71dd51c63f2" (UID: "c9986b64-c5fc-455f-b4f5-d71dd51c63f2"). InnerVolumeSpecName "kube-api-access-gcmq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:11:20 crc kubenswrapper[4791]: I0218 02:11:20.982752 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9986b64-c5fc-455f-b4f5-d71dd51c63f2" (UID: "c9986b64-c5fc-455f-b4f5-d71dd51c63f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.022999 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcmq6\" (UniqueName: \"kubernetes.io/projected/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-kube-api-access-gcmq6\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.023050 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.023070 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9986b64-c5fc-455f-b4f5-d71dd51c63f2-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.462947 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-txstw" event={"ID":"c9986b64-c5fc-455f-b4f5-d71dd51c63f2","Type":"ContainerDied","Data":"cf9ec7636dc1b0a8f8f0ac263480aa0c324d54563821eb7569caa03a69b42bbf"} Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.463019 4791 scope.go:117] "RemoveContainer" containerID="a16780adafaf7de74953b817cb9e9d44efbead529141085998743690d43d33cd" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.463039 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-txstw" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.491012 4791 scope.go:117] "RemoveContainer" containerID="450e02812412ec7999c2ab88f828df4f0b973d693b0242654a97ab5f8d2f97ed" Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.500076 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.511459 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-txstw"] Feb 18 02:11:21 crc kubenswrapper[4791]: I0218 02:11:21.516757 4791 scope.go:117] "RemoveContainer" containerID="cebd69712060716e45df01a72c8a871d988c164ddf444b6784b56f40fc67b8b7" Feb 18 02:11:23 crc kubenswrapper[4791]: I0218 02:11:23.072919 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9986b64-c5fc-455f-b4f5-d71dd51c63f2" path="/var/lib/kubelet/pods/c9986b64-c5fc-455f-b4f5-d71dd51c63f2/volumes" Feb 18 02:11:24 crc kubenswrapper[4791]: I0218 02:11:24.511077 4791 generic.go:334] "Generic (PLEG): container finished" podID="88180350-bdaf-41fa-9333-ce46ca319e87" containerID="90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459" exitCode=0 Feb 18 02:11:24 crc kubenswrapper[4791]: I0218 02:11:24.511148 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerDied","Data":"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459"} Feb 18 02:11:25 crc kubenswrapper[4791]: I0218 02:11:25.521751 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerStarted","Data":"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2"} Feb 18 02:11:25 crc kubenswrapper[4791]: I0218 02:11:25.557061 4791 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-llvmr" podStartSLOduration=3.012891827 podStartE2EDuration="13.557039827s" podCreationTimestamp="2026-02-18 02:11:12 +0000 UTC" firstStartedPulling="2026-02-18 02:11:14.35288433 +0000 UTC m=+5815.920897510" lastFinishedPulling="2026-02-18 02:11:24.8970323 +0000 UTC m=+5826.465045510" observedRunningTime="2026-02-18 02:11:25.541151848 +0000 UTC m=+5827.109165018" watchObservedRunningTime="2026-02-18 02:11:25.557039827 +0000 UTC m=+5827.125052987" Feb 18 02:11:30 crc kubenswrapper[4791]: E0218 02:11:30.063688 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:11:31 crc kubenswrapper[4791]: E0218 02:11:31.064360 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:11:33 crc kubenswrapper[4791]: I0218 02:11:33.200303 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:33 crc kubenswrapper[4791]: I0218 02:11:33.200891 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:34 crc kubenswrapper[4791]: I0218 02:11:34.245855 4791 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-llvmr" podUID="88180350-bdaf-41fa-9333-ce46ca319e87" containerName="registry-server" probeResult="failure" output=< Feb 18 02:11:34 crc kubenswrapper[4791]: timeout: failed to connect service ":50051" within 1s Feb 18 02:11:34 crc kubenswrapper[4791]: > Feb 18 02:11:42 crc kubenswrapper[4791]: E0218 02:11:42.063993 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:11:43 crc kubenswrapper[4791]: E0218 02:11:43.063798 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:11:43 crc kubenswrapper[4791]: I0218 02:11:43.247593 4791 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:43 crc kubenswrapper[4791]: I0218 02:11:43.313124 4791 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:44 crc kubenswrapper[4791]: I0218 02:11:44.035297 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:44 crc kubenswrapper[4791]: I0218 02:11:44.762382 4791 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-llvmr" podUID="88180350-bdaf-41fa-9333-ce46ca319e87" containerName="registry-server" containerID="cri-o://9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2" gracePeriod=2 Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.294483 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.390474 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content\") pod \"88180350-bdaf-41fa-9333-ce46ca319e87\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.390594 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxznv\" (UniqueName: \"kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv\") pod \"88180350-bdaf-41fa-9333-ce46ca319e87\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.390638 4791 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities\") pod \"88180350-bdaf-41fa-9333-ce46ca319e87\" (UID: \"88180350-bdaf-41fa-9333-ce46ca319e87\") " Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.392311 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities" (OuterVolumeSpecName: "utilities") pod "88180350-bdaf-41fa-9333-ce46ca319e87" (UID: "88180350-bdaf-41fa-9333-ce46ca319e87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.411984 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv" (OuterVolumeSpecName: "kube-api-access-gxznv") pod "88180350-bdaf-41fa-9333-ce46ca319e87" (UID: "88180350-bdaf-41fa-9333-ce46ca319e87"). InnerVolumeSpecName "kube-api-access-gxznv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.494116 4791 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxznv\" (UniqueName: \"kubernetes.io/projected/88180350-bdaf-41fa-9333-ce46ca319e87-kube-api-access-gxznv\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.494386 4791 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-utilities\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.526632 4791 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88180350-bdaf-41fa-9333-ce46ca319e87" (UID: "88180350-bdaf-41fa-9333-ce46ca319e87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.596605 4791 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88180350-bdaf-41fa-9333-ce46ca319e87-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.774788 4791 generic.go:334] "Generic (PLEG): container finished" podID="88180350-bdaf-41fa-9333-ce46ca319e87" containerID="9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2" exitCode=0 Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.774840 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerDied","Data":"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2"} Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.774876 4791 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-llvmr" event={"ID":"88180350-bdaf-41fa-9333-ce46ca319e87","Type":"ContainerDied","Data":"ea75230956824ab508e884609e246f66f224bcb252775953fa0809dc714a78ed"} Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.774897 4791 scope.go:117] "RemoveContainer" containerID="9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.775805 4791 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-llvmr" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.799207 4791 scope.go:117] "RemoveContainer" containerID="90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.820522 4791 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.833167 4791 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-llvmr"] Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.841697 4791 scope.go:117] "RemoveContainer" containerID="fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.874465 4791 scope.go:117] "RemoveContainer" containerID="9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2" Feb 18 02:11:45 crc kubenswrapper[4791]: E0218 02:11:45.874801 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2\": container with ID starting with 9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2 not found: ID does not exist" containerID="9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.874832 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2"} err="failed to get container status \"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2\": rpc error: code = NotFound desc = could not find container \"9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2\": container with ID starting with 9e7405684aa09cc7eaf7fc3a2761e45d869b593387f964aadb97bb12158acae2 not found: ID does not exist" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.874853 4791 scope.go:117] "RemoveContainer" containerID="90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459" Feb 18 02:11:45 crc kubenswrapper[4791]: E0218 02:11:45.875115 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459\": container with ID starting with 90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459 not found: ID does not exist" containerID="90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.875142 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459"} err="failed to get container status \"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459\": rpc error: code = NotFound desc = could not find container \"90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459\": container with ID starting with 90c868a7fd911f8a1c25b0fe11e53aa2529581e9a46f092e6cfc12c6112e2459 not found: ID does not exist" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.875169 4791 scope.go:117] "RemoveContainer" containerID="fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe" Feb 18 02:11:45 crc kubenswrapper[4791]: E0218 02:11:45.875518 4791 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe\": container with ID starting with fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe not found: ID does not exist" containerID="fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe" Feb 18 02:11:45 crc kubenswrapper[4791]: I0218 02:11:45.875541 4791 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe"} err="failed to get container status \"fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe\": rpc error: code = NotFound desc = could not find container \"fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe\": container with ID starting with fb40d1dd9b22e95e97b9d92b330ebc2997c5aba701a708a267b60630b5fc1bfe not found: ID does not exist" Feb 18 02:11:47 crc kubenswrapper[4791]: I0218 02:11:47.078757 4791 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88180350-bdaf-41fa-9333-ce46ca319e87" path="/var/lib/kubelet/pods/88180350-bdaf-41fa-9333-ce46ca319e87/volumes" Feb 18 02:11:53 crc kubenswrapper[4791]: E0218 02:11:53.065254 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:11:54 crc kubenswrapper[4791]: E0218 02:11:54.063628 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:12:05 crc kubenswrapper[4791]: E0218 02:12:05.064027 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:12:05 crc kubenswrapper[4791]: E0218 02:12:05.064613 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:12:19 crc kubenswrapper[4791]: E0218 02:12:19.075922 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:12:20 crc kubenswrapper[4791]: E0218 02:12:20.064474 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:12:33 crc kubenswrapper[4791]: E0218 02:12:33.065967 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:12:34 crc kubenswrapper[4791]: E0218 02:12:34.064093 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:12:46 crc kubenswrapper[4791]: E0218 02:12:46.065193 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:12:48 crc kubenswrapper[4791]: E0218 02:12:48.063586 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:12:58 crc kubenswrapper[4791]: E0218 02:12:58.067324 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:13:02 crc kubenswrapper[4791]: E0218 02:13:02.063391 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:13:13 crc kubenswrapper[4791]: E0218 02:13:13.067574 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:13:15 crc kubenswrapper[4791]: I0218 02:13:15.063605 4791 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 18 02:13:15 crc kubenswrapper[4791]: E0218 02:13:15.168001 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:13:15 crc kubenswrapper[4791]: E0218 02:13:15.168083 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Feb 18 02:13:15 crc kubenswrapper[4791]: E0218 02:13:15.168281 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lgn7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-scphk_openstack(68e5e8d6-5771-4045-858a-4a39b2db99f9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:13:15 crc kubenswrapper[4791]: E0218 02:13:15.170169 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:13:26 crc kubenswrapper[4791]: E0218 02:13:26.064101 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95" Feb 18 02:13:26 crc kubenswrapper[4791]: I0218 02:13:26.799604 4791 patch_prober.go:28] interesting pod/machine-config-daemon-bhfmv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 18 02:13:26 crc kubenswrapper[4791]: I0218 02:13:26.799932 4791 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-bhfmv" podUID="0b31a333-8f95-459c-8135-e91e557c4c85" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 18 02:13:28 crc kubenswrapper[4791]: E0218 02:13:28.063259 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-scphk" podUID="68e5e8d6-5771-4045-858a-4a39b2db99f9" Feb 18 02:13:37 crc kubenswrapper[4791]: E0218 02:13:37.214269 4791 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:13:37 crc kubenswrapper[4791]: E0218 02:13:37.216846 4791 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Feb 18 02:13:37 crc kubenswrapper[4791]: E0218 02:13:37.217332 4791 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndh699h566hcch79h59h595h58bhb6h7fh594h67fh546h668h56hc8h6chd5h94h599h587hddh64bh57bhc9h68fh7dh57bh65dh64fh68dh59dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-np7gr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(4b9cec47-aeda-40f0-b83e-46f09ce65e95): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Feb 18 02:13:37 crc kubenswrapper[4791]: E0218 02:13:37.218858 4791 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="4b9cec47-aeda-40f0-b83e-46f09ce65e95"